1 #include "hmbdc/Copyright.hpp" 5 #include "hmbdc/app/StuckClientPurger.hpp" 6 #include "hmbdc/Config.hpp" 7 #include "hmbdc/numeric/BitMath.hpp" 13 namespace hmbdc {
namespace app {
27 namespace context_property {
54 template <u
int16_t max_parallel_consumer = DEFAULT_HMBDC_CAPACITY>
56 static_assert(max_parallel_consumer >= 4u
118 #include "hmbdc/app/ContextDetail.hpp" 119 namespace hmbdc {
namespace app {
121 namespace context_detail {
135 template <
size_t MaxMessageSize,
typename... ContextProperties>
143 MAX_MESSAGE_SIZE = MaxMessageSize,
144 BUFFER_VALUE_SIZE = MaxMessageSize + 8u,
147 size_t maxMessageSize()
const {
148 if (MaxMessageSize == 0)
return maxMessageSizeRuntime_;
149 return MaxMessageSize;
160 template <
typename M0,
typename M1,
typename ... Messages>
161 typename std::enable_if<!std::is_integral<M1>::value,
void>::type
162 send(M0&& m0, M1&& m1, Messages&&... msgs) {
163 auto n =
sizeof...(msgs) + 2;
164 auto it = buffer_.claim(n);
165 sendRecursive(it, std::forward<M0>(m0), std::forward<M1>(m1), std::forward<Messages>(msgs)...);
166 buffer_.commit(it, n);
179 template <
typename M0,
typename M1,
typename ... Messages>
180 typename std::enable_if<!std::is_integral<M1>::value,
bool>::type
181 trySend(M0&& m0, M1&& m1, Messages&&... msgs) {
182 auto n =
sizeof...(msgs) + 2;
183 auto it = buffer_.tryClaim(n);
185 sendRecursive(it, std::forward<M0>(m0), std::forward<M1>(m1), std::forward<Messages>(msgs)...);
186 buffer_.commit(it, n);
201 template <
typename ForwardIt>
203 send(ForwardIt begin,
size_t n) {
204 if (hmbdc_likely(n)) {
205 auto bit = buffer_.claim(n);
207 for (
auto i = 0ul; i < n; i++) {
208 using Message =
typename iterator_traits<ForwardIt>::value_type;
211 buffer_.commit(bit, n);
223 template <
typename ForwardIt>
226 if (hmbdc_likely(n)) {
227 auto bit = buffer_.tryClaim(n);
228 if (hmbdc_unlikely(!bit))
return false;
230 for (
auto i = 0ul; i < n; i++) {
231 using Message =
typename iterator_traits<ForwardIt>::value_type;
234 buffer_.commit(bit, n);
247 template <
typename Message>
249 using M =
typename std::remove_reference<Message>::type;
250 static_assert(MAX_MESSAGE_SIZE == 0 ||
sizeof(
MessageWrap<M>) <= BUFFER_VALUE_SIZE
251 ,
"message too big");
252 if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0 &&
sizeof(
MessageWrap<M>) > buffer_.maxItemSize())) {
253 HMBDC_THROW(std::out_of_range,
"message too big");
267 template <
typename Message>
269 using M =
typename std::remove_reference<Message>::type;
270 static_assert(MAX_MESSAGE_SIZE == 0 ||
sizeof(
MessageWrap<M>) <= BUFFER_VALUE_SIZE
271 ,
"message too big");
272 if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0 &&
sizeof(
MessageWrap<M>) > buffer_.maxItemSize())) {
273 HMBDC_THROW(std::out_of_range,
"message too big");
287 template <
typename Message,
typename ... Args>
290 ,
"message too big");
291 if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0 &&
sizeof(
MessageWrap<Message>) > buffer_.maxItemSize())) {
292 HMBDC_THROW(std::out_of_range,
"message too big");
294 buffer_.template putInPlace<MessageWrap<Message>>(std::forward<Args>(args)...);
309 template <
typename Message,
typename ... Args>
312 ,
"message too big");
313 if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0 &&
sizeof(
MessageWrap<Message>) > buffer_.maxItemSize())) {
314 HMBDC_THROW(std::out_of_range,
"message too big");
316 return buffer_.template tryPutInPlace<MessageWrap<Message>>(std::forward<Args>(args)...);
330 ,
size_t maxMessageSizeRuntime
331 ,
char const* shmName)
333 , Buffer::footprint(maxMessageSizeRuntime + 8u
334 , messageQueueSizePower2Num), O_RDWR | (cpa::create_ipc?O_CREAT:0)
336 , bufferptr_(allocator_.template allocate<Buffer>(SMP_CACHE_BYTES
337 , maxMessageSizeRuntime + 8u, messageQueueSizePower2Num
340 , buffer_(*bufferptr_) {
341 if (messageQueueSizePower2Num < 2) {
342 HMBDC_THROW(std::out_of_range
343 ,
"messageQueueSizePower2Num need >= 2");
345 if (MaxMessageSize && maxMessageSizeRuntime != MAX_MESSAGE_SIZE) {
346 HMBDC_THROW(std::out_of_range
347 ,
"can only set maxMessageSizeRuntime when template value MaxMessageSize is 0");
349 maxMessageSizeRuntime_ = maxMessageSizeRuntime;
350 primeBuffer<(cpa::create_ipc || (!cpa::create_ipc && !cpa::attach_ipc)) && cpa::has_pool>();
351 if (cpa::create_ipc || cpa::attach_ipc) {
357 allocator_.unallocate(bufferptr_);
365 template <
typename BroadCastBuf>
367 void markDeadFrom(BroadCastBuf& buffer, uint16_t poolThreadCount) {
368 for (uint16_t i = poolThreadCount;
369 i < BroadCastBuf::max_parallel_consumer;
381 template <
typename BroadCastBuf>
383 void markDead(BroadCastBuf& buffer, std::list<uint16_t>slots) {
384 for (
auto s : slots) {
389 Allocator allocator_;
390 Buffer* HMBDC_RESTRICT bufferptr_;
391 Buffer& HMBDC_RESTRICT buffer_;
395 typename std::enable_if<doIt, void>::type
397 markDeadFrom(buffer_, 0);
401 typename std::enable_if<!doIt, void>::type
405 template <
typename M,
typename... Messages>
406 void sendRecursive(
typename Buffer::iterator it
407 , M&& msg, Messages&&... msgs) {
408 using Message =
typename std::remove_reference<M>::type;
410 ,
"message too big");
411 if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0 &&
sizeof(
MessageWrap<Message>) > buffer_.maxItemSize())) {
412 HMBDC_THROW(std::out_of_range,
"message too big");
415 sendRecursive(++it, std::forward<M>(msgs)...);
417 void sendRecursive(
typename Buffer::iterator) {}
419 size_t maxMessageSizeRuntime_;
454 template <
size_t MaxMessageSize = 0,
typename... ContextProperties>
458 using Buffer =
typename Base::Buffer;
460 using Pool =
typename std::conditional<cpa::pool_msgless
473 Context(uint32_t messageQueueSizePower2Num = MaxMessageSize?20:2
474 ,
size_t maxPoolClientCount = MaxMessageSize?128:0
475 ,
size_t maxMessageSizeRuntime = MaxMessageSize
476 ,
size_t maxThreadSerialNumber = 64)
477 :
Base(messageQueueSizePower2Num < 2?2:messageQueueSizePower2Num
478 , maxMessageSizeRuntime, nullptr)
479 , usedHmbdcCapacity_(0)
481 , pool_(createPool<
cpa>(maxPoolClientCount))
482 , poolThreadCount_(0) {
483 static_assert(!cpa::create_ipc && !cpa::attach_ipc
484 ,
"no name specified for ipc Context");
504 , uint32_t messageQueueSizePower2Num = MaxMessageSize?20:0
505 ,
size_t maxPoolClientCount = MaxMessageSize?128:0
506 ,
size_t maxMessageSizeRuntime = MaxMessageSize
507 , uint64_t purgerCpuAffinityMask = 0xfffffffffffffffful
508 ,
size_t maxThreadSerialNumber = 64)
509 :
Base(messageQueueSizePower2Num, maxMessageSizeRuntime, ipcTransportName)
510 , usedHmbdcCapacity_(0)
512 , pool_(createPool<
cpa>(maxPoolClientCount))
513 , poolThreadCount_(0)
514 , secondsBetweenPurge_(60)
515 , purgerCpuAffinityMask_(purgerCpuAffinityMask) {
516 static_assert(cpa::create_ipc || cpa::attach_ipc
517 ,
"ctor can only be used with ipc turned on Context");
518 static_assert(!(cpa::create_ipc && cpa::attach_ipc)
519 ,
"Context cannot be both ipc_creator and ipc_attacher");
528 if (cpa::create_ipc) {
529 Base::markDeadFrom(this->buffer_, 0);
550 template <
typename Client>
552 , uint64_t poolThreadAffinityIn = 0xfffffffffffffffful) {
553 static_assert(cpa::has_pool,
"pool is not support in the Context type");
554 if (std::is_base_of<single_thread_powered_client, Client>::value
555 && hmbdc::numeric::setBitsCount(poolThreadAffinityIn) != 1
556 && poolThreadCount_ != 1) {
557 HMBDC_THROW(std::out_of_range
558 ,
"cannot add a single thread powered client to the non-single" 559 "thread powered pool without specifying a single thread poolThreadAffinity" 563 pool_->addConsumer(*stub, poolThreadAffinityIn);
582 template <
typename Client,
typename ... Args>
584 , uint64_t poolThreadAffinityIn, Args&& ...args) {
585 addToPool(client, poolThreadAffinityIn);
586 addToPool(std::forward<Args>(args)...);
601 template <
typename Client,
typename Client2,
typename ... Args>
602 typename std::enable_if<!std::is_integral<Client2>::value,
void>::type
605 addToPool(client2, std::forward<Args>(args)...);
614 static_assert(cpa::has_pool,
"pool is not support in the Context type");
615 return pool_->consumerSize();
625 return this->buffer_.parallelConsumerAlive();
654 template <
typename ...Args>
657 startWithContextProperty<cpa>(std::forward<Args>(args) ...);
667 stopWithContextProperty<cpa>();
676 joinWithContextProperty<cpa>();
692 secondsBetweenPurge_ = s;
704 static_assert(cpa::has_pool,
"pool is not support in the Context type");
705 pool_->runOnce(threadSerialNumberInPool);
715 template <
typename Client>
717 c.messageDispatchingStarted(
718 hmbdcNumbers_[threadSerialNumber]);
719 context_detail::runOnceImpl(
720 hmbdcNumbers_[threadSerialNumber], stopped_, this->buffer_, c);
724 template <
typename cpa>
725 typename std::enable_if<cpa::has_pool && !cpa::pool_msgless, typename Pool::ptr>::type
726 createPool(
size_t maxPoolClientCount) {
727 return Pool::create(this->buffer(), maxPoolClientCount);
730 template <
typename cpa>
731 typename std::enable_if<cpa::pool_msgless, typename Pool::ptr>::type
732 createPool(
size_t maxPoolClientCount) {
733 return Pool::create(maxPoolClientCount);
736 template <
typename cpa>
737 typename std::enable_if<!cpa::has_pool && !cpa::pool_msgless, typename Pool::ptr>::type
739 return typename Pool::ptr();
742 template <
typename cpa>
743 typename std::enable_if<cpa::has_pool, void>::type
744 stopWithContextProperty() {
745 if (pool_) pool_->stop();
746 __atomic_thread_fence(__ATOMIC_ACQUIRE);
750 template <
typename cpa>
751 typename std::enable_if<!cpa::has_pool, void>::type
752 stopWithContextProperty() {
753 __atomic_thread_fence(__ATOMIC_ACQUIRE);
757 template <
typename cpa>
758 typename std::enable_if<cpa::has_pool, void>::type
759 joinWithContextProperty() {
760 if (pool_) pool_->join();
761 for (
auto& t : threads_) {
767 template <
typename cpa>
768 typename std::enable_if<!cpa::has_pool, void>::type
769 joinWithContextProperty() {
770 for (
auto& t : threads_) {
776 template <
typename cpa>
778 reserveSlots(std::list<uint16_t>&) {
781 template <
typename cpa,
typename ...Args>
782 typename std::enable_if<cpa::broadcast_msg && !cpa::pool_msgless, void>::type
783 reserveSlots(std::list<uint16_t>& slots, uint16_t poolThreadCount, uint64_t, Args&& ... args) {
784 auto available = this->buffer_.unusedConsumerIndexes();
785 if (available.size() < poolThreadCount) {
786 HMBDC_THROW(std::out_of_range
787 ,
"Context remaining capacilty = " << available.size()
788 <<
", consider increasing max_parallel_consumer");
790 for (uint16_t i = 0; i < poolThreadCount; ++i) {
791 slots.push_back(available[i]);
792 this->buffer_.reset(available[i]);
794 reserveSlots<cpa>(slots, std::forward<Args>(args) ...);
797 template <
typename cpa,
typename ...Args>
798 typename std::enable_if<!cpa::broadcast_msg || cpa::pool_msgless, void>::type
799 reserveSlots(std::list<uint16_t>& slots, uint16_t poolThreadCount, uint64_t, Args&& ... args) {
800 reserveSlots<cpa>(slots, std::forward<Args>(args) ...);
803 template <
typename cpa,
typename CcClient,
typename ...Args>
804 typename std::enable_if<cpa::broadcast_msg && !std::is_integral<CcClient>::value,
void>::type
805 reserveSlots(std::list<uint16_t>& slots, CcClient& c, uint64_t, Args&& ... args) {
806 const bool clientParticipateInMessaging =
807 std::remove_reference<CcClient>::type::REGISTERED_MESSAGE_SIZE != 0;
808 if (clientParticipateInMessaging) {
809 auto available = this->buffer_.unusedConsumerIndexes();
810 if (!available.size()) {
811 HMBDC_THROW(std::out_of_range
812 ,
"Context reached capacity, consider increasing max_parallel_consumer");
814 this->buffer_.reset(available[0]);
815 slots.push_back(available[0]);
817 reserveSlots<cpa>(slots, std::forward<Args>(args) ...);
820 template <
typename cpa,
typename CcClient,
typename ...Args>
821 typename std::enable_if<!cpa::broadcast_msg && !std::is_integral<CcClient>::value,
void>::type
822 reserveSlots(std::list<uint16_t>& slots, CcClient& c, uint64_t, Args&& ... args) {
825 template <
typename cpa,
typename ...Args>
826 typename std::enable_if<cpa::create_ipc || cpa::attach_ipc, void>::type
827 startWithContextProperty(Args&& ... args) {
828 auto& lock = this->allocator_.fileLock();
829 std::lock_guard<decltype(lock)> g(lock);
830 std::list<uint16_t> slots;
832 reserveSlots<cpa>(slots, std::forward<Args>(args) ...);
834 startWithContextPropertyImpl<cpa>(sc, std::forward<Args>(args) ...);
835 }
catch (std::out_of_range
const&) {
836 Base::markDead(this->buffer_, slots);
841 template <
typename cpa,
typename ...Args>
842 typename std::enable_if<!cpa::create_ipc && !cpa::attach_ipc, void>::type
843 startWithContextProperty(Args&& ... args) {
844 std::list<uint16_t> slots;
846 reserveSlots<cpa>(slots, std::forward<Args>(args) ...);
848 startWithContextPropertyImpl<cpa>(sc, std::forward<Args>(args) ...);
849 }
catch (std::out_of_range
const&) {
850 Base::markDead(this->buffer_, slots);
855 template <
typename cpa>
856 typename std::enable_if<cpa::broadcast_msg && cpa::create_ipc, void>::type
857 startWithContextPropertyImpl(std::list<uint16_t>& slots) {
861 startWithContextPropertyImpl<cpa>(slots, *purger_, purgerCpuAffinityMask_);
865 template <
typename cpa>
866 typename std::enable_if<!cpa::broadcast_msg || !cpa::create_ipc, void>::type
867 startWithContextPropertyImpl(std::list<uint16_t>& slots) {
870 template <
typename cpa,
typename ...Args>
871 typename std::enable_if<cpa::has_pool, void>::type
872 startWithContextPropertyImpl(std::list<uint16_t>& slots
873 , uint16_t poolThreadCount, uint64_t poolThreadsCpuAffinityMask
876 if (poolThreadCount_) {
877 HMBDC_THROW(std::out_of_range,
"Context pool already started");
879 std::vector<uint16_t> sc(slots.begin(), slots.end());
880 if (!poolThreadsCpuAffinityMask) {
881 auto cpuCount = std::thread::hardware_concurrency();
882 poolThreadsCpuAffinityMask =
883 ((1ul << poolThreadCount) - 1u) << (hmbdcNumbers_.size() % cpuCount);
886 pool_->startAt(poolThreadCount, poolThreadsCpuAffinityMask, sc);
887 while(poolThreadCount--) {
888 if (!cpa::pool_msgless) {
889 hmbdcNumbers_.push_back(*slots.begin());
893 poolThreadCount_ = poolThreadCount;
894 startWithContextPropertyImpl<cpa>(slots, std::forward<Args>(args) ...);
897 template <
typename cpa,
typename Client,
typename ...Args>
898 typename std::enable_if<!std::is_integral<Client>::value,
void>::type
899 startWithContextPropertyImpl(std::list<uint16_t>& slots
900 , Client& c, uint64_t cpuAffinity
902 auto clientParticipateInMessaging =
903 std::remove_reference<Client>::type::REGISTERED_MESSAGE_SIZE;
904 uint16_t hmbdcNumber = 0xffffu;
905 if (clientParticipateInMessaging && cpa::broadcast_msg) {
906 hmbdcNumber = *slots.begin();
909 auto thrd = kickOffClientThread(
910 c, cpuAffinity, hmbdcNumber, hmbdcNumbers_.size());
911 threads_.push_back(move(thrd));
912 hmbdcNumbers_.push_back(hmbdcNumber);
913 startWithContextPropertyImpl<cpa>(slots, std::forward<Args>(args) ...);
916 template <
typename Client>
917 auto kickOffClientThread(
918 Client& c, uint64_t mask, uint16_t hmbdcNumber, uint16_t threadSerialNumber) {
926 auto hmbdcNumber = h;
928 char const* schedule;
930 auto clientParticipateInMessaging =
931 std::remove_reference<Client>::type::REGISTERED_MESSAGE_SIZE;
937 if (clientParticipateInMessaging) {
938 name =
"hmbdc" + std::to_string(hmbdcNumber);
943 auto cpuAffinityMask = mask;
944 std::tie(schedule, priority) = c.
schedSpec();
946 if (!schedule) schedule =
"SCHED_OTHER";
949 auto cpuCount = std::thread::hardware_concurrency();
950 cpuAffinityMask = 1ul << (threadSerialNumber % cpuCount);
953 hmbdc::os::configureCurrentThread(name.c_str(), cpuAffinityMask
954 , schedule, priority);
956 hmbdcNumber = clientParticipateInMessaging?hmbdcNumber:0xffffu;
960 context_detail::runOnceImpl(hmbdcNumber, this->stopped_, this->buffer_, c)) {
962 if (this->stopped_) c.dropped();
963 if (clientParticipateInMessaging) context_detail::unblock(this->buffer_, hmbdcNumber);
972 uint16_t usedHmbdcCapacity_;
973 std::vector<uint16_t> hmbdcNumbers_;
976 typename Pool::ptr pool_;
977 using Threads = std::vector<std::thread>;
979 size_t poolThreadCount_;
980 uint32_t secondsBetweenPurge_;
981 uint64_t purgerCpuAffinityMask_;
982 typename std::conditional<cpa::broadcast_msg && cpa::create_ipc
983 , std::unique_ptr<StuckClientPurger<Buffer>>, uint32_t
size_t parallelConsumerAlive() const
how many parallel consummers are started
Definition: Context.hpp:624
void runClientThreadOnce(uint16_t threadSerialNumber, Client &c)
normally not used until you want to run your own message loop
Definition: Context.hpp:716
Definition: MonoLockFreeBuffer.hpp:15
Definition: ContextDetail.hpp:60
Definition: StuckClientPurger.hpp:11
Context template parameter indicating the Context must contain a pool to run Clients and the Clients ...
Definition: Context.hpp:86
void stop()
stop the message dispatching - asynchronously
Definition: Context.hpp:666
covers the inter-thread and ipc communication fascade
Definition: Context.hpp:136
void join()
wait until all threads (Pool threads too if apply) of the Context exit
Definition: Context.hpp:675
Context template parameter inidcating each message is sent to one and only one of the clients within ...
Definition: Context.hpp:75
std::enable_if<!std::is_integral< Client2 >::value, void >::type addToPool(Client &client, Client2 &client2, Args &&...args)
add a bunch of clients to Context's pool - the Clients are run in pool mode
Definition: Context.hpp:603
Definition: TypedString.hpp:74
Definition: PoolMinus.hpp:9
the default vanilla allocate
Definition: Allocators.hpp:116
void send(Message &&m)
send a message to the Context or attached ipc Contexts
Definition: Context.hpp:248
Definition: BlockingBuffer.hpp:11
void runPoolThreadOnce(uint16_t threadSerialNumberInPool)
normally not used until you want to run your own message loop
Definition: Context.hpp:703
std::enable_if<!std::is_integral< M1 >::value, void >::type send(M0 &&m0, M1 &&m1, Messages &&...msgs)
try send a batch of messages to the Context or attached ipc Contexts
Definition: Context.hpp:162
void sendInPlace(Args &&...args)
send a message to all Clients in the Context or attached ipc Contexts
Definition: Context.hpp:288
bool trySend(ForwardIt begin, size_t n)
try send a range of messages to the Context or attached ipc Contexts
Definition: Context.hpp:225
std::tuple< char const *, int > schedSpec() const
an overrideable method. returns the schedule policy and priority, override if necessary priority is o...
Definition: Client.hpp:69
Definition: ContextDetail.hpp:28
char const * hmbdcName() const
return the name of thread that runs this client, override if necessary
Definition: Client.hpp:59
void start(Args &&...args)
start the context by specifying what are in it (Pool and/or direct Clients) and their paired up cpu a...
Definition: Context.hpp:656
void send(ForwardIt begin, size_t n)
send a range of messages to the Context or attached ipc Contexts
Definition: Context.hpp:203
Context template parameter inidcating each message is sent to all clients within the Context...
Definition: Context.hpp:55
void addToPool(Client &client, uint64_t poolThreadAffinityIn=0xfffffffffffffffful)
add a client to Context's pool - the Client is run in pool mode
Definition: Context.hpp:551
Definition: LockFreeBufferT.hpp:18
Context(char const *ipcTransportName, uint32_t messageQueueSizePower2Num=MaxMessageSize?20:0, size_t maxPoolClientCount=MaxMessageSize?128:0, size_t maxMessageSizeRuntime=MaxMessageSize, uint64_t purgerCpuAffinityMask=0xfffffffffffffffful, size_t maxThreadSerialNumber=64)
ctor for construct local ipc Context
Definition: Context.hpp:503
A Context is like a media object that facilitates the communications for the Clients that it is holdi...
Definition: Context.hpp:455
~Context()
dtor
Definition: Context.hpp:527
Definition: Message.hpp:72
bool trySendInPlace(Args &&...args)
try send a message to all Clients in the Context or attached ipc Contexts if it wouldn't block ...
Definition: Context.hpp:310
virtual void messageDispatchingStartedCb(uint16_t threadSerialNumber)
called before any messages got dispatched - only once
Definition: Client.hpp:93
Definition: BitMath.hpp:9
Buffer & buffer()
accessor - mostly used internally
Definition: Context.hpp:323
Context template parameter indicating the Context is ipc enabled and it can be attached (see ipc_atta...
Definition: Context.hpp:101
void addToPool(Client &client, uint64_t poolThreadAffinityIn, Args &&...args)
add a bunch of clients to Context's pool - the Clients are run in pool mode
Definition: Context.hpp:583
size_t clientCountInPool() const
return the numebr of clients added into pool
Definition: Context.hpp:613
std::enable_if<!std::is_integral< M1 >::value, bool >::type trySend(M0 &&m0, M1 &&m1, Messages &&...msgs)
try to send a batch of message to the Context or attached ipc Contexts
Definition: Context.hpp:181
A Client represents a thread of execution/a task. The execution is managed by a Context. a Client object could participate in message dispatching as the receiver of specifed message types.
Definition: Client.hpp:47
void setSecondsBetweenPurge(uint32_t s)
ipc_creator Context runs a StcuClientPurger to purge crashed (or slow, stuck ...) Clients from the ip...
Definition: Context.hpp:691
Context(uint32_t messageQueueSizePower2Num=MaxMessageSize?20:2, size_t maxPoolClientCount=MaxMessageSize?128:0, size_t maxMessageSizeRuntime=MaxMessageSize, size_t maxThreadSerialNumber=64)
ctor for construct local non-ipc Context
Definition: Context.hpp:473
bool trySend(Message &&m)
try to send a message to the Context or attached ipc Contexts if it wouldn't block ...
Definition: Context.hpp:268
Context template parameter indicating the Context is ipc enabled and it can attach to an ipc transpor...
Definition: Context.hpp:114