#include <Request_Interceptor.h>


Public Member Functions | |
| virtual void | send_request (PortableInterceptor::ClientRequestInfo_ptr ri) |
| virtual void | send_poll (PortableInterceptor::ClientRequestInfo_ptr ri) |
| virtual void | receive_reply (PortableInterceptor::ClientRequestInfo_ptr ri) |
| virtual void | receive_exception (PortableInterceptor::ClientRequestInfo_ptr ri) |
| virtual void | receive_other (PortableInterceptor::ClientRequestInfo_ptr ri) |
| virtual char * | name (void) |
| virtual void | destroy (void) |
Static Public Attributes | |
| static const IOP::ServiceId | SchedulingInfo = 30 |
Definition at line 20 of file Request_Interceptor.h.
| void Client_Interceptor::destroy | ( | void | ) | [virtual] |
Definition at line 239 of file Request_Interceptor.cpp.
{
}
| char * Client_Interceptor::name | ( | void | ) | [virtual] |
Definition at line 233 of file Request_Interceptor.cpp.
{
return CORBA::string_dup ("RTSchdeuler_Client_Interceptor");
}
| void Client_Interceptor::receive_exception | ( | PortableInterceptor::ClientRequestInfo_ptr | ri | ) | [virtual] |
Definition at line 158 of file Request_Interceptor.cpp.
{
if (TAO_debug_level > 0)
ACE_DEBUG ((LM_DEBUG,
"Client_Interceptor::receive_exception\n"));
TAO_TSS_Resources *tss = TAO_TSS_Resources::instance ();
TAO_RTScheduler_Current_i *current =
static_cast<TAO_RTScheduler_Current_i *> (tss->rtscheduler_current_impl_);
if (current != 0)
{
if (ri == 0)
{
ACE_ERROR ((LM_ERROR,
"ri = 0\n"));
return;
}
CORBA::Any_var ex = ri->received_exception ();
CORBA::TypeCode_var type = ex->type ();
if (CORBA::is_nil (type.in ()))
{
ACE_ERROR ((LM_ERROR,
"type = 0\n"));
return;
}
const char * id = type->id ();
if (TAO_debug_level > 0)
ACE_DEBUG ((LM_DEBUG,
"Received Exception %C\n",
id));
// If the remote host threw a THREAD_CANCELLED
// exception, make sure to take the appropriate
// local action.
if (ACE_OS::strstr (id, "CORBA::THREAD_CANCELLED") == 0)
{
// Perform the necessary cleanup as the
// thread was cancelled.
current->cancel_thread ();
}
else
{
// Inform scheduler that exception was
// received.
RTScheduling::Scheduler_var scheduler = current->scheduler ();
scheduler->receive_exception (ri);
}
}
}
| void Client_Interceptor::receive_other | ( | PortableInterceptor::ClientRequestInfo_ptr | ri | ) | [virtual] |
Definition at line 214 of file Request_Interceptor.cpp.
{
if (TAO_debug_level > 0)
ACE_DEBUG ((LM_DEBUG,
"Client_Interceptor::receive_other\n"));
TAO_RTScheduler_Current_i *current = 0;
TAO_TSS_Resources *tss = TAO_TSS_Resources::instance ();
current = static_cast<TAO_RTScheduler_Current_i *> (tss->rtscheduler_current_impl_);
if (current != 0)
{
RTScheduling::Scheduler_var scheduler = current->scheduler ();
scheduler->receive_other (ri);
}
}
| void Client_Interceptor::receive_reply | ( | PortableInterceptor::ClientRequestInfo_ptr | ri | ) | [virtual] |
Definition at line 139 of file Request_Interceptor.cpp.
{
if (TAO_debug_level > 0)
ACE_DEBUG ((LM_DEBUG,
"Client_Interceptor::receive_reply\n"));
TAO_RTScheduler_Current_i *current = 0;
TAO_TSS_Resources *tss = TAO_TSS_Resources::instance ();
current = static_cast<TAO_RTScheduler_Current_i *> (tss->rtscheduler_current_impl_);
if (current != 0)
{
RTScheduling::Scheduler_var scheduler = current->scheduler ();
scheduler->receive_reply (ri);
}
}
| void Client_Interceptor::send_poll | ( | PortableInterceptor::ClientRequestInfo_ptr | ri | ) | [virtual] |
Definition at line 120 of file Request_Interceptor.cpp.
{
if (TAO_debug_level > 0)
ACE_DEBUG ((LM_DEBUG,
"Client_Interceptor::send_poll\n"));
TAO_RTScheduler_Current_i *current = 0;
TAO_TSS_Resources *tss = TAO_TSS_Resources::instance ();
current = static_cast<TAO_RTScheduler_Current_i *> (tss->rtscheduler_current_impl_);
if (current != 0)
{
RTScheduling::Scheduler_var scheduler = current->scheduler ();
scheduler->send_poll (ri);
}
}
| void Client_Interceptor::send_request | ( | PortableInterceptor::ClientRequestInfo_ptr | ri | ) | [virtual] |
Definition at line 25 of file Request_Interceptor.cpp.
{
if (TAO_debug_level > 0)
ACE_DEBUG ((LM_DEBUG,
"Client_Interceptor::send_request\n"));
// Temporary current.
TAO_RTScheduler_Current_i *new_current = 0;
TAO_RTScheduler_Current_i *current = 0;
TAO_TSS_Resources *tss = TAO_TSS_Resources::instance ();
current = static_cast<TAO_RTScheduler_Current_i *> (tss->rtscheduler_current_impl_);
if (current != 0)
{
// If this is a one way request
if (!ri->response_expected ())
{
// Generate GUID.
RTScheduling::Current::IdType guid;
guid.length (sizeof(long));
size_t temp = ++TAO_RTScheduler_Current::guid_counter;
ACE_OS::memcpy (guid.get_buffer (),
&temp,
sizeof(size_t));
size_t id;
ACE_OS::memcpy (&id,
guid.get_buffer (),
guid.length ());
if (TAO_debug_level > 0)
ACE_DEBUG ((LM_DEBUG,
"The Guid is %d %d\n",
id,
TAO_RTScheduler_Current::guid_counter.value_i ()));
// Create new DT.
RTScheduling::DistributableThread_var dt =
TAO_DistributableThread_Factory::create_DT ();
// Add new DT to map.
int result = current->dt_hash ()->bind (guid, dt);
if (result != 0)
{
ACE_DEBUG ((LM_DEBUG,
"No Scheduling Segment Context\n"));
throw ::CORBA::INTERNAL ();
}
// @@ Store implicit_sched_param in a var
// Create new temporary current. Note that
// the new <sched_param> is the current
// <implicit_sched_param> and there is no
// segment name.
CORBA::Policy_var implicit_sched_param =
current->implicit_scheduling_parameter ();
ACE_NEW (new_current,
TAO_RTScheduler_Current_i (current->orb (),
current->dt_hash (),
guid,
0,
implicit_sched_param.in (),
0,
dt.in (),
current));
// Install new current in the ORB.
//current->implementation (new_current);
tss->rtscheduler_current_impl_ = new_current;
}
// Scheduler populates the service context with
// scheduling parameters.
RTScheduling::Scheduler_var scheduler = current->scheduler ();
scheduler->send_request (ri);
// If this is a one way request
if (!ri->response_expected ())
{
// Cleanup temporary DT.
new_current->cleanup_DT ();
//Restore old current
new_current->cleanup_current ();
}
}
}
const IOP::ServiceId Client_Interceptor::SchedulingInfo = 30 [static] |
Definition at line 41 of file Request_Interceptor.h.
1.7.0