1 #include "hmbdc/Copyright.hpp" 4 #include "hmbdc/Config.hpp" 5 #include "hmbdc/app/utils/StuckClientPurger.hpp" 6 #include "hmbdc/numeric/BitMath.hpp" 7 #include "hmbdc/os/Allocators.hpp" 11 namespace hmbdc {
namespace app {
25 namespace context_property {
52 template <u
int16_t max_parallel_consumer = DEFAULT_HMBDC_CAPACITY>
54 static_assert(max_parallel_consumer >= 4u
108 #include "hmbdc/app/ContextDetail.hpp" 109 namespace hmbdc {
namespace app {
111 namespace context_detail {
125 template <
size_t MaxMessageSize,
typename... ContextProperties>
133 MAX_MESSAGE_SIZE = MaxMessageSize,
134 BUFFER_VALUE_SIZE = MaxMessageSize + 8u,
137 size_t maxMessageSize()
const {
138 if (MaxMessageSize == 0)
return maxMessageSizeRuntime_;
139 return MaxMessageSize;
150 template <
typename M0,
typename M1,
typename ... Messages>
151 typename enable_if<!std::is_integral<M1>::value,
void>::type
152 send(M0&& m0, M1&& m1, Messages&&... msgs) {
153 auto n =
sizeof...(msgs) + 2;
154 auto it = buffer_.claim(n);
155 sendRecursive(it, std::forward<M0>(m0), std::forward<M1>(m1), std::forward<Messages>(msgs)...);
156 buffer_.commit(it, n);
169 template <
typename M0,
typename M1,
typename ... Messages>
170 typename enable_if<!std::is_integral<M1>::value,
bool>::type
171 trySend(M0&& m0, M1&& m1, Messages&&... msgs) {
172 auto n =
sizeof...(msgs) + 2;
173 auto it = buffer_.tryClaim(n);
175 sendRecursive(it, std::forward<M0>(m0), std::forward<M1>(m1), std::forward<Messages>(msgs)...);
176 buffer_.commit(it, n);
191 template <
typename ForwardIt>
193 send(ForwardIt begin,
size_t n) {
194 if (hmbdc_likely(n)) {
195 auto bit = buffer_.claim(n);
197 for (
auto i = 0ul; i < n; i++) {
198 using Message =
typename iterator_traits<ForwardIt>::value_type;
201 buffer_.commit(bit, n);
213 template <
typename ForwardIt>
216 if (hmbdc_likely(n)) {
217 auto bit = buffer_.tryClaim(n);
218 if (hmbdc_unlikely(!bit))
return false;
220 for (
auto i = 0ul; i < n; i++) {
221 using Message =
typename iterator_traits<ForwardIt>::value_type;
224 buffer_.commit(bit, n);
237 template <
typename Message>
239 using M =
typename std::remove_reference<Message>::type;
240 static_assert(MAX_MESSAGE_SIZE == 0 ||
sizeof(
MessageWrap<M>) <= BUFFER_VALUE_SIZE
241 ,
"message too big");
242 if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0 &&
sizeof(
MessageWrap<M>) > buffer_.maxItemSize())) {
243 HMBDC_THROW(std::out_of_range,
"message too big");
257 template <
typename Message>
259 using M =
typename std::remove_reference<Message>::type;
260 static_assert(MAX_MESSAGE_SIZE == 0 ||
sizeof(
MessageWrap<M>) <= BUFFER_VALUE_SIZE
261 ,
"message too big");
262 if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0 &&
sizeof(
MessageWrap<M>) > buffer_.maxItemSize())) {
263 HMBDC_THROW(std::out_of_range,
"message too big");
277 template <
typename Message,
typename ... Args>
280 ,
"message too big");
281 if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0 &&
sizeof(
MessageWrap<Message>) > buffer_.maxItemSize())) {
282 HMBDC_THROW(std::out_of_range,
"message too big");
284 buffer_.template putInPlace<MessageWrap<Message>>(std::forward<Args>(args)...);
298 ,
size_t maxMessageSizeRuntime
299 ,
char const* shmName)
301 , Buffer::footprint(maxMessageSizeRuntime + 8u
302 , messageQueueSizePower2Num), O_RDWR | (cpa::create_ipc?O_CREAT:0)
304 , bufferptr_(allocator_.template allocate<Buffer>(1
305 , maxMessageSizeRuntime + 8u, messageQueueSizePower2Num
308 , buffer_(*bufferptr_) {
309 if (messageQueueSizePower2Num < 2) {
310 HMBDC_THROW(std::out_of_range
311 ,
"messageQueueSizePower2Num need >= 2");
313 if (MaxMessageSize && maxMessageSizeRuntime != MAX_MESSAGE_SIZE) {
314 HMBDC_THROW(std::out_of_range
315 ,
"can only set maxMessageSizeRuntime when template value MaxMessageSize is 0");
317 maxMessageSizeRuntime_ = maxMessageSizeRuntime;
318 primeBuffer<cpa::create_ipc && cpa::has_pool>();
319 if (cpa::create_ipc || cpa::attach_ipc) {
325 allocator_.unallocate(bufferptr_);
333 template <
typename BroadCastBuf>
335 void markDeadFrom(BroadCastBuf& buffer, uint16_t poolThreadCount) {
336 for (uint16_t i = poolThreadCount;
337 i < BroadCastBuf::max_parallel_consumer;
342 Allocator allocator_;
343 Buffer* HMBDC_RESTRICT bufferptr_;
344 Buffer& HMBDC_RESTRICT buffer_;
348 typename enable_if<doIt, void>::type
350 markDeadFrom(buffer_, 0);
354 typename enable_if<!doIt, void>::type
358 template <
typename M,
typename... Messages>
359 void sendRecursive(
typename Buffer::iterator it
360 , M&& msg, Messages&&... msgs) {
361 using Message =
typename std::remove_reference<M>::type;
363 ,
"message too big");
364 if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0 &&
sizeof(
MessageWrap<Message>) > buffer_.maxItemSize())) {
365 HMBDC_THROW(std::out_of_range,
"message too big");
368 sendRecursive(++it, std::forward<M>(msgs)...);
370 void sendRecursive(
typename Buffer::iterator) {}
372 size_t maxMessageSizeRuntime_;
407 template <
size_t MaxMessageSize = 0,
typename... ContextProperties>
411 using Buffer =
typename Base::Buffer;
422 Context(uint32_t messageQueueSizePower2Num = MaxMessageSize?20:2
423 ,
size_t maxPoolClientCount = MaxMessageSize?128:0
424 ,
size_t maxMessageSizeRuntime = MaxMessageSize)
425 :
Base(messageQueueSizePower2Num < 2?2:messageQueueSizePower2Num
426 , maxMessageSizeRuntime, nullptr)
427 , startToBeContinued_(true)
428 , usedHmbdcCapacity_(0)
429 , currentThreadSerialNumber_(0)
431 , pool_(createPool(this->buffer_, maxPoolClientCount))
432 , poolThreadCount_(0) {
433 static_assert(!cpa::create_ipc && !cpa::attach_ipc
434 ,
"no name specified for ipc Context");
453 , uint32_t messageQueueSizePower2Num = MaxMessageSize?20:0
454 ,
size_t maxPoolClientCount = MaxMessageSize?128:0
455 ,
size_t maxMessageSizeRuntime = MaxMessageSize
456 , uint64_t purgerCpuAffinityMask = 0xfffffffffffffffful)
457 :
Base(messageQueueSizePower2Num, maxMessageSizeRuntime, ipcTransportName)
458 , startToBeContinued_(true)
459 , usedHmbdcCapacity_(0)
460 , currentThreadSerialNumber_(0)
462 , pool_(createPool(this->buffer_, maxPoolClientCount))
463 , poolThreadCount_(0)
464 , secondsBetweenPurge_(60)
465 , purgerCpuAffinityMask_(purgerCpuAffinityMask) {
466 static_assert(cpa::create_ipc || cpa::attach_ipc
467 ,
"ctor can only be used with ipc turned on Context");
468 static_assert(!(cpa::create_ipc && cpa::attach_ipc)
469 ,
"Context cannot be both ipc_creator and ipc_attacher");
478 if (cpa::create_ipc) {
479 this->markDeadFrom(this->buffer_, 0);
500 template <
typename Client>
502 , uint64_t poolThreadAffinityIn = 0xfffffffffffffffful) {
503 static_assert(cpa::has_pool,
"pool is not support in the Context type");
504 if (std::is_base_of<single_thread_powered_client, Client>::value
505 && hmbdc::numeric::setBitsCount(poolThreadAffinityIn) != 1
506 && poolThreadCount_ != 1) {
507 HMBDC_THROW(std::out_of_range
508 ,
"cannot add a single thread powered client to the non-single" 509 "thread powered pool without specifying a single thread poolThreadAffinity" 512 pool_->addConsumer(client, poolThreadAffinityIn);
531 template <
typename Client,
typename ... Args>
533 , uint64_t poolThreadAffinityIn, Args&& ...args) {
534 addToPool(client, poolThreadAffinityIn);
535 addToPool(std::forward<Args>(args)...);
544 static_assert(cpa::has_pool,
"pool is not support in the Context type");
545 return pool_->consumerSize();
555 return this->buffer_.parallelConsumerAlive();
583 template <
typename ...Args>
585 start(uint16_t poolThreadCount, uint64_t poolThreadsCpuAffinityMask
587 startWithContextProperty<cpa>(poolThreadCount, poolThreadsCpuAffinityMask
588 , std::forward<Args>(args) ...);
611 template <
typename Client,
typename ...Args>
612 typename std::enable_if<!std::is_integral<Client>::value,
void>::type
614 startWithContextProperty<cpa>(c, cpuAffinity, std::forward<Args>(args) ...);
621 startWithContextProperty<cpa>();
631 stopWithContextProperty<cpa>();
640 joinWithContextProperty<cpa>();
656 secondsBetweenPurge_ = s;
668 static_assert(cpa::has_pool,
"pool is not support in the Context type");
669 pool_->runOnce(threadSerialNumber);
679 template <
typename Client>
681 c.messageDispatchingStarted(threadSerialNumber);
682 context_detail::runOnceImpl(threadSerialNumber, stopped_
687 template <
typename Buffer>
690 createPool(Buffer& buffer,
size_t maxPoolClientCount) {
691 return Pool::create(buffer, maxPoolClientCount);
697 return typename Pool::ptr();
700 template <
typename cpa>
701 typename std::enable_if<cpa::has_pool, void>::type
702 stopWithContextProperty() {
703 if (pool_) pool_->stop();
704 __sync_synchronize();
708 template <
typename cpa>
709 typename std::enable_if<!cpa::has_pool, void>::type
710 stopWithContextProperty() {
711 __sync_synchronize();
715 template <
typename cpa>
716 typename std::enable_if<cpa::has_pool, void>::type
717 joinWithContextProperty() {
718 if (pool_) pool_->join();
719 for (
auto& t : threads_) {
725 template <
typename cpa>
726 typename std::enable_if<!cpa::has_pool, void>::type
727 joinWithContextProperty() {
728 for (
auto& t : threads_) {
734 template <
typename cpa,
typename ...Args>
735 typename std::enable_if<!cpa::attach_ipc && !cpa::create_ipc, void>::type
736 startWithContextProperty(uint16_t poolThreadCount, uint64_t poolThreadsCpuAffinityMask
738 static_assert(cpa::has_pool,
"pool is not support in the Context type");
739 poolThreadCount_ = poolThreadCount;
740 if (!startToBeContinued_) {
741 HMBDC_THROW(std::runtime_error
742 ,
"Exception: conflicting with previously indicated start completed!");
745 pool_->start(poolThreadCount, poolThreadsCpuAffinityMask,
false);
747 usedHmbdcCapacity_ = poolThreadCount;
748 currentThreadSerialNumber_ = usedHmbdcCapacity_;
749 startLocalClients(std::forward<Args>(args) ...);
752 template <
typename cpa,
typename Client,
typename ...Args>
753 typename std::enable_if<!std::is_integral<Client>::value && !cpa::create_ipc && !cpa::attach_ipc && !cpa::has_pool,
void>::type
754 startWithContextProperty(Client& c, uint64_t cpuAffinity, Args&& ... args) {
755 if (!startToBeContinued_) {
756 HMBDC_THROW(std::runtime_error
757 ,
"Exception: conflicting with previously indicated start completed!");
759 startLocalClients(c, cpuAffinity, std::forward<Args>(args) ...);
762 template <
typename cpa,
typename Client,
typename ...Args>
763 typename std::enable_if<!std::is_integral<Client>::value && !cpa::create_ipc && !cpa::attach_ipc && cpa::has_pool,
void>::type
764 startWithContextProperty(Client& c, uint64_t cpuAffinity, Args&& ... args) {
765 if (!startToBeContinued_) {
766 HMBDC_THROW(std::runtime_error
767 ,
"Exception: conflicting with previously indicated start completed!");
769 startLocalClients(c, cpuAffinity, std::forward<Args>(args) ...);
772 template <
typename cpa>
773 typename std::enable_if<!cpa::create_ipc && !cpa::attach_ipc, void>::type
774 startWithContextProperty() {
775 startLocalClients(
false);
778 template <
typename cpa>
779 typename std::enable_if<(cpa::create_ipc || cpa::attach_ipc) && cpa::has_pool, void>::type
780 startWithContextProperty() {
781 startBroadcastIpcClients();
784 template <
typename cpa,
typename ...Args>
785 typename std::enable_if<(cpa::create_ipc || cpa::attach_ipc) && cpa::has_pool, void>::type
786 startWithContextProperty(uint16_t poolThreadCount, uint64_t poolThreadsCpuAffinityMask
789 if (poolThreadCount_) {
790 HMBDC_THROW(std::out_of_range,
"Context pool already started");
792 auto& lock = this->allocator_.fileLock();
793 std::lock_guard<decltype(lock)> g(lock);
794 auto slots = this->buffer_.unusedConsumerIndexes();
795 if (slots.size() <
sizeof...(args) / 2) {
796 HMBDC_THROW(std::out_of_range
797 ,
"Context instance support Client count = " << slots.size());
799 poolThreadCount_ = poolThreadCount;
800 pool_->startThruRecycling(poolThreadCount, poolThreadsCpuAffinityMask);
801 currentThreadSerialNumber_ = poolThreadCount_;
802 startBroadcastIpcClients(std::forward<Args>(args) ...);
805 template <
typename cpa,
typename Client,
typename ...Args>
806 typename std::enable_if<(cpa::create_ipc || cpa::attach_ipc) && cpa::has_pool && !std::is_integral<Client>::value
808 startWithContextProperty(Client& c, uint64_t cpuAffinity
810 auto& lock = this->allocator_.fileLock();
811 std::lock_guard<decltype(lock)> g(lock);
813 auto clientParticipateInMessaging =
814 std::remove_reference<Client>::type::REGISTERED_MESSAGE_SIZE;
815 if (clientParticipateInMessaging) {
816 auto slots = this->buffer_.unusedConsumerIndexes();
817 if (slots.size() < 1u +
sizeof...(args) / 2) {
818 HMBDC_THROW(std::out_of_range
819 ,
"Context instance support Client count = " << slots.size());
822 startBroadcastIpcClients(c, cpuAffinity, std::forward<Args>(args) ...);
825 template <
typename cpa,
typename ...Args>
826 typename std::enable_if<(cpa::create_ipc || cpa::attach_ipc) && !cpa::has_pool, void>::type
827 startWithContextProperty(Args&& ... args) {
828 startPartitionIpcClients(std::forward<Args>(args) ...);
832 startLocalClients(
bool startToBeContinuedFlag = cpa::can_start_anytime) {
833 startToBeContinued_ = startToBeContinuedFlag;
834 if (!startToBeContinued_) {
835 this->markDeadFrom(this->buffer_, usedHmbdcCapacity_);
839 template <
typename CcClient,
typename ...Args>
840 void startLocalClients(CcClient& c, uint64_t mask, Args&&... args) {
841 if (usedHmbdcCapacity_ >= Buffer::max_parallel_consumer
842 && CcClient::REGISTERED_MESSAGE_SIZE) {
843 HMBDC_THROW(std::out_of_range
844 ,
"messaging participating client number > allowed thread number of " 845 << Buffer::max_parallel_consumer);
849 kickOffClientThread(c, mask, usedHmbdcCapacity_, currentThreadSerialNumber_);
850 threads_.push_back(move(thrd));
852 auto clientParticipateInMessaging =
853 std::remove_reference<CcClient>::type::REGISTERED_MESSAGE_SIZE;
854 if (clientParticipateInMessaging) usedHmbdcCapacity_++;
855 currentThreadSerialNumber_++;
856 startLocalClients(std::forward<Args>(args)...);
859 void startBroadcastIpcClients(){
860 if (cpa::create_ipc && !purger_) {
863 startBroadcastIpcClients(*purger_, purgerCpuAffinityMask_);
867 template <
typename Client,
typename ...Args>
868 void startBroadcastIpcClients(Client& c, uint64_t mask, Args&&... args) {
869 auto clientParticipateInMessaging =
870 std::remove_reference<Client>::type::REGISTERED_MESSAGE_SIZE;
871 uint16_t hmbdcNumber = 0xffffu;
872 if (clientParticipateInMessaging) {
873 auto slots = this->buffer_.unusedConsumerIndexes();
874 auto it = slots.begin();
876 this->buffer_.reset(hmbdcNumber);
878 auto thrd = kickOffClientThread(
879 c, mask, hmbdcNumber, currentThreadSerialNumber_);
880 threads_.push_back(move(thrd));
881 currentThreadSerialNumber_++;
882 startBroadcastIpcClients(std::forward<Args>(args)...);
885 void startPartitionIpcClients(){}
887 template <
typename Client,
typename ...Args>
888 void startPartitionIpcClients(Client& c, uint64_t mask, Args&&... args) {
889 auto thrd = kickOffClientThread(
890 c, mask, currentThreadSerialNumber_, currentThreadSerialNumber_);
891 threads_.push_back(move(thrd));
892 currentThreadSerialNumber_++;
893 startPartitionIpcClients(std::forward<Args>(args)...);
896 template <
typename Client>
897 auto kickOffClientThread(
898 Client& c, uint64_t mask, uint16_t hmbdcNumber, uint16_t threadSerialNumber) {
906 auto hmbdcNumber = h;
908 char const* schedule;
910 auto clientParticipateInMessaging =
911 std::remove_reference<Client>::type::REGISTERED_MESSAGE_SIZE;
917 if (clientParticipateInMessaging) {
918 name =
"hmbdc" + std::to_string(hmbdcNumber);
923 auto cpuAffinityMask = mask;
924 std::tie(schedule, priority) = c.
schedSpec();
926 if (!schedule) schedule =
"SCHED_OTHER";
929 auto cpuCount = std::thread::hardware_concurrency();
930 cpuAffinityMask = 1ul << (threadSerialNumber % cpuCount);
933 hmbdc::os::configureCurrentThread(name.c_str(), cpuAffinityMask
934 , schedule, priority);
936 hmbdcNumber = clientParticipateInMessaging?hmbdcNumber:0xffffu;
940 context_detail::runOnceImpl(hmbdcNumber, this->stopped_, this->buffer_, c)) {
942 if (this->stopped_) c.dropped();
943 if (clientParticipateInMessaging) context_detail::unblock(this->buffer_, hmbdcNumber);
952 bool startToBeContinued_;
953 uint16_t usedHmbdcCapacity_;
954 uint16_t currentThreadSerialNumber_;
956 typename Pool::ptr pool_;
957 using Threads = std::vector<std::thread>;
959 size_t poolThreadCount_;
960 uint32_t secondsBetweenPurge_;
961 uint64_t purgerCpuAffinityMask_;
962 typename std::conditional<cpa::has_pool
963 , std::unique_ptr<utils::StuckClientPurger<Buffer>>, uint32_t
size_t parallelConsumerAlive() const
how many parallel consummers are started
Definition: Context.hpp:554
void runClientThreadOnce(uint16_t threadSerialNumber, Client &c)
normally not used until you want to run your own message loop
Definition: Context.hpp:680
Definition: MonoLockFreeBuffer.hpp:14
Definition: ContextDetail.hpp:29
Definition: StuckClientPurger.hpp:11
void stop()
stop the message dispatching - asynchronously
Definition: Context.hpp:630
covers the inter-thread and ipc communication fascade
Definition: Context.hpp:126
void join()
wait until all threads (Pool threads too if apply) of the Context exit
Definition: Context.hpp:639
Context template parameter inidcating each message is sent to one and only one of the clients within ...
Definition: Context.hpp:76
Definition: TypedString.hpp:74
enable_if<!std::is_integral< M1 >::value, bool >::type trySend(M0 &&m0, M1 &&m1, Messages &&...msgs)
try to send a batch of message to the Context or attached ipc Contexts
Definition: Context.hpp:171
std::enable_if<!std::is_integral< Client >::value, void >::type start(Client &c, uint64_t cpuAffinity, Args &&...args)
start the context (without its Pool) and direct Clients
Definition: Context.hpp:613
the default vanilla allocate
Definition: Allocators.hpp:116
void send(Message &&m)
send a message to the Context or attached ipc Contexts
Definition: Context.hpp:238
Definition: GuardedSingleton.hpp:9
void start(uint16_t poolThreadCount, uint64_t poolThreadsCpuAffinityMask, Args &&...args)
start the context and specify its Pool and direct Clients
Definition: Context.hpp:585
Context(uint32_t messageQueueSizePower2Num=MaxMessageSize?20:2, size_t maxPoolClientCount=MaxMessageSize?128:0, size_t maxMessageSizeRuntime=MaxMessageSize)
ctor for construct local non-ipc Context
Definition: Context.hpp:422
void sendInPlace(Args &&...args)
send a message to all Clients in the Context or attached ipc Contexts
Definition: Context.hpp:278
bool trySend(ForwardIt begin, size_t n)
try send a range of messages to the Context or attached ipc Contexts
Definition: Context.hpp:215
std::tuple< char const *, int > schedSpec() const
an overrideable method. returns the schedule policy and priority, override if necessary priority is o...
Definition: Client.hpp:126
char const * hmbdcName() const
return the name of thread that runs this client, override if necessary
Definition: Client.hpp:116
void send(ForwardIt begin, size_t n)
send a range of messages to the Context or attached ipc Contexts
Definition: Context.hpp:193
enable_if<!std::is_integral< M1 >::value, void >::type send(M0 &&m0, M1 &&m1, Messages &&...msgs)
try send a batch of messages to the Context or attached ipc Contexts
Definition: Context.hpp:152
void messageDispatchingStartedCb(uint16_t threadSerialNumber) override
called before any messages got dispatched - only once
Definition: Client.hpp:70
Context template parameter inidcating each message is sent to all clients within the Context...
Definition: Context.hpp:53
void addToPool(Client &client, uint64_t poolThreadAffinityIn=0xfffffffffffffffful)
add a client to Context's pool - the Client is run in pool mode
Definition: Context.hpp:501
Definition: LockFreeBufferT.hpp:18
A Context is like a media object that facilitates the communications for the Clients that it is holdi...
Definition: Context.hpp:408
void runPoolThreadOnce(uint16_t threadSerialNumber)
normally not used until you want to run your own message loop
Definition: Context.hpp:667
~Context()
dtor
Definition: Context.hpp:477
Definition: Message.hpp:55
void start()
tell hmbdc that there is no more direct mode Client to start
Definition: Context.hpp:620
Context(char const *ipcTransportName, uint32_t messageQueueSizePower2Num=MaxMessageSize?20:0, size_t maxPoolClientCount=MaxMessageSize?128:0, size_t maxMessageSizeRuntime=MaxMessageSize, uint64_t purgerCpuAffinityMask=0xfffffffffffffffful)
ctor for construct local ipc Context
Definition: Context.hpp:452
Definition: BitMath.hpp:9
Buffer & buffer()
accessor - mostly used internally
Definition: Context.hpp:291
Context template parameter indicating the Context is ipc enabled and it can be attached (see ipc_atta...
Definition: Context.hpp:91
void addToPool(Client &client, uint64_t poolThreadAffinityIn, Args &&...args)
add a bunch of clients to Context's pool - the Clients are run in pool mode
Definition: Context.hpp:532
size_t clientCountInPool() const
return the numebr of clients added into pool
Definition: Context.hpp:543
A Client represents a thread of execution/a task. The execution is managed by a Context. a Client object could participate in message dispatching as the receiver of specifed message types.
Definition: Client.hpp:47
void setSecondsBetweenPurge(uint32_t s)
ipc_creator Context runs a StcuClientPurger to purge crashed (or slow, stuck ...) Clients from the ip...
Definition: Context.hpp:655
bool trySend(Message &&m)
try to send a message to the Context or attached ipc Contexts
Definition: Context.hpp:258
Context template parameter indicating the Context is ipc enabled and it can attach to an ipc transpor...
Definition: Context.hpp:104