1 #include "hmbdc/Copyright.hpp" 5 #include "hmbdc/app/StuckClientPurger.hpp" 6 #include "hmbdc/Config.hpp" 7 #include "hmbdc/numeric/BitMath.hpp" 13 namespace hmbdc {
namespace app {
27 namespace context_property {
53 template <u
int16_t max_parallel_consumer = DEFAULT_HMBDC_CAPACITY>
55 static_assert(max_parallel_consumer >= 4u
117 #include "hmbdc/app/ContextDetail.hpp" 118 namespace hmbdc {
namespace app {
120 namespace context_detail {
134 template <
size_t MaxMessageSize,
typename... ContextProperties>
142 MAX_MESSAGE_SIZE = MaxMessageSize,
143 BUFFER_VALUE_SIZE = MaxMessageSize + 8u,
146 size_t maxMessageSize()
const {
147 if (MaxMessageSize == 0)
return maxMessageSizeRuntime_;
148 return MaxMessageSize;
159 template <
typename M0,
typename M1,
typename ... Messages>
160 typename std::enable_if<!std::is_integral<M1>::value,
void>::type
161 send(M0&& m0, M1&& m1, Messages&&... msgs) {
162 auto n =
sizeof...(msgs) + 2;
163 auto it = buffer_.claim(n);
164 sendRecursive(it, std::forward<M0>(m0), std::forward<M1>(m1), std::forward<Messages>(msgs)...);
165 buffer_.commit(it, n);
178 template <
typename M0,
typename M1,
typename ... Messages>
179 typename std::enable_if<!std::is_integral<M1>::value,
bool>::type
180 trySend(M0&& m0, M1&& m1, Messages&&... msgs) {
181 auto n =
sizeof...(msgs) + 2;
182 auto it = buffer_.tryClaim(n);
184 sendRecursive(it, std::forward<M0>(m0), std::forward<M1>(m1), std::forward<Messages>(msgs)...);
185 buffer_.commit(it, n);
200 template <
typename ForwardIt>
202 send(ForwardIt begin,
size_t n) {
203 if (hmbdc_likely(n)) {
204 auto bit = buffer_.claim(n);
206 for (
auto i = 0ul; i < n; i++) {
207 using Message =
typename iterator_traits<ForwardIt>::value_type;
208 static_assert(std::is_trivially_destructible<Message>::value,
"cannot send message with dtor");
211 buffer_.commit(bit, n);
223 template <
typename ForwardIt>
226 if (hmbdc_likely(n)) {
227 auto bit = buffer_.tryClaim(n);
228 if (hmbdc_unlikely(!bit))
return false;
230 for (
auto i = 0ul; i < n; i++) {
231 using Message =
typename iterator_traits<ForwardIt>::value_type;
232 static_assert(std::is_trivially_destructible<Message>::value,
"cannot send message with dtor");
235 buffer_.commit(bit, n);
248 template <
typename Message>
250 using M =
typename std::remove_reference<Message>::type;
251 static_assert(std::is_trivially_destructible<M>::value,
"cannot send message with dtor");
252 static_assert(MAX_MESSAGE_SIZE == 0 ||
sizeof(
MessageWrap<M>) <= BUFFER_VALUE_SIZE
253 ,
"message too big");
254 if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0 &&
sizeof(
MessageWrap<M>) > buffer_.maxItemSize())) {
255 HMBDC_THROW(std::out_of_range,
"message too big");
269 template <
typename Message>
271 using M =
typename std::remove_reference<Message>::type;
272 static_assert(std::is_trivially_destructible<M>::value,
"cannot send message with dtor");
273 static_assert(MAX_MESSAGE_SIZE == 0 ||
sizeof(
MessageWrap<M>) <= BUFFER_VALUE_SIZE
274 ,
"message too big");
275 if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0 &&
sizeof(
MessageWrap<M>) > buffer_.maxItemSize())) {
276 HMBDC_THROW(std::out_of_range,
"message too big");
290 template <
typename Message,
typename ... Args>
292 static_assert(std::is_trivially_destructible<Message>::value,
"cannot send message with dtor");
294 ,
"message too big");
295 if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0 &&
sizeof(
MessageWrap<Message>) > buffer_.maxItemSize())) {
296 HMBDC_THROW(std::out_of_range,
"message too big");
298 buffer_.template putInPlace<MessageWrap<Message>>(std::forward<Args>(args)...);
313 template <
typename Message,
typename ... Args>
315 static_assert(std::is_trivially_destructible<Message>::value,
"cannot send message with dtor");
317 ,
"message too big");
318 if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0 &&
sizeof(
MessageWrap<Message>) > buffer_.maxItemSize())) {
319 HMBDC_THROW(std::out_of_range,
"message too big");
321 return buffer_.template tryPutInPlace<MessageWrap<Message>>(std::forward<Args>(args)...);
335 ,
size_t maxMessageSizeRuntime
336 ,
char const* shmName)
338 , Buffer::footprint(maxMessageSizeRuntime + 8u
339 , messageQueueSizePower2Num), O_RDWR | (cpa::create_ipc?O_CREAT:0)
341 , bufferptr_(allocator_.template allocate<Buffer>(SMP_CACHE_BYTES
342 , maxMessageSizeRuntime + 8u, messageQueueSizePower2Num
345 , buffer_(*bufferptr_) {
346 if (messageQueueSizePower2Num < 2) {
347 HMBDC_THROW(std::out_of_range
348 ,
"messageQueueSizePower2Num need >= 2");
350 if (MaxMessageSize && maxMessageSizeRuntime != MAX_MESSAGE_SIZE) {
351 HMBDC_THROW(std::out_of_range
352 ,
"can only set maxMessageSizeRuntime when template value MaxMessageSize is 0");
354 maxMessageSizeRuntime_ = maxMessageSizeRuntime;
355 primeBuffer<(cpa::create_ipc || (!cpa::create_ipc && !cpa::attach_ipc)) && cpa::has_pool>();
356 if (cpa::create_ipc || cpa::attach_ipc) {
362 allocator_.unallocate(bufferptr_);
370 template <
typename BroadCastBuf>
372 void markDeadFrom(BroadCastBuf& buffer, uint16_t poolThreadCount) {
373 for (uint16_t i = poolThreadCount;
374 i < BroadCastBuf::max_parallel_consumer;
386 template <
typename BroadCastBuf>
388 void markDead(BroadCastBuf& buffer, std::list<uint16_t>slots) {
389 for (
auto s : slots) {
394 Allocator allocator_;
395 Buffer* HMBDC_RESTRICT bufferptr_;
396 Buffer& HMBDC_RESTRICT buffer_;
400 typename std::enable_if<doIt, void>::type
402 markDeadFrom(buffer_, 0);
406 typename std::enable_if<!doIt, void>::type
410 template <
typename M,
typename... Messages>
411 void sendRecursive(
typename Buffer::iterator it
412 , M&& msg, Messages&&... msgs) {
413 using Message =
typename std::remove_reference<M>::type;
414 static_assert(std::is_trivially_destructible<Message>::value,
"cannot send message with dtor");
416 ,
"message too big");
417 if (hmbdc_unlikely(MAX_MESSAGE_SIZE == 0 &&
sizeof(
MessageWrap<Message>) > buffer_.maxItemSize())) {
418 HMBDC_THROW(std::out_of_range,
"message too big");
421 sendRecursive(++it, std::forward<M>(msgs)...);
423 void sendRecursive(
typename Buffer::iterator) {}
425 size_t maxMessageSizeRuntime_;
460 template <
size_t MaxMessageSize = 0,
typename... ContextProperties>
464 using Buffer =
typename Base::Buffer;
466 using Pool =
typename std::conditional<cpa::pool_msgless
479 Context(uint32_t messageQueueSizePower2Num = MaxMessageSize?20:2
480 ,
size_t maxPoolClientCount = MaxMessageSize?128:0
481 ,
size_t maxMessageSizeRuntime = MaxMessageSize
482 ,
size_t maxThreadSerialNumber = 64)
483 :
Base(messageQueueSizePower2Num < 2?2:messageQueueSizePower2Num
484 , maxMessageSizeRuntime, nullptr)
485 , usedHmbdcCapacity_(0)
487 , pool_(createPool<
cpa>(maxPoolClientCount))
488 , poolThreadCount_(0) {
489 static_assert(!cpa::create_ipc && !cpa::attach_ipc
490 ,
"no name specified for ipc Context");
510 , uint32_t messageQueueSizePower2Num = MaxMessageSize?20:0
511 ,
size_t maxPoolClientCount = MaxMessageSize?128:0
512 ,
size_t maxMessageSizeRuntime = MaxMessageSize
513 , uint64_t purgerCpuAffinityMask = 0xfffffffffffffffful
514 ,
size_t maxThreadSerialNumber = 64)
515 :
Base(messageQueueSizePower2Num, maxMessageSizeRuntime, ipcTransportName)
516 , usedHmbdcCapacity_(0)
518 , pool_(createPool<
cpa>(maxPoolClientCount))
519 , poolThreadCount_(0)
520 , secondsBetweenPurge_(60)
521 , purgerCpuAffinityMask_(purgerCpuAffinityMask) {
522 static_assert(cpa::create_ipc || cpa::attach_ipc
523 ,
"ctor can only be used with ipc turned on Context");
524 static_assert(!(cpa::create_ipc && cpa::attach_ipc)
525 ,
"Context cannot be both ipc_creator and ipc_attacher");
534 if (cpa::create_ipc) {
535 Base::markDeadFrom(this->buffer_, 0);
556 template <
typename Client>
558 , uint64_t poolThreadAffinityIn = 0xfffffffffffffffful) {
559 static_assert(cpa::has_pool,
"pool is not support in the Context type");
560 if (std::is_base_of<single_thread_powered_client, Client>::value
561 && hmbdc::numeric::setBitsCount(poolThreadAffinityIn) != 1
562 && poolThreadCount_ != 1) {
563 HMBDC_THROW(std::out_of_range
564 ,
"cannot add a single thread powered client to the non-single" 565 "thread powered pool without specifying a single thread poolThreadAffinity" 569 pool_->addConsumer(*stub, poolThreadAffinityIn);
588 template <
typename Client,
typename ... Args>
590 , uint64_t poolThreadAffinityIn, Args&& ...args) {
591 addToPool(client, poolThreadAffinityIn);
592 addToPool(std::forward<Args>(args)...);
607 template <
typename Client,
typename Client2,
typename ... Args>
608 typename std::enable_if<!std::is_integral<Client2>::value,
void>::type
611 addToPool(client2, std::forward<Args>(args)...);
620 static_assert(cpa::has_pool,
"pool is not support in the Context type");
621 return pool_->consumerSize();
631 return this->buffer_.parallelConsumerAlive();
660 template <
typename ...Args>
663 startWithContextProperty<cpa>(std::forward<Args>(args) ...);
673 stopWithContextProperty<cpa>();
682 joinWithContextProperty<cpa>();
698 secondsBetweenPurge_ = s;
710 static_assert(cpa::has_pool,
"pool is not support in the Context type");
711 pool_->runOnce(threadSerialNumberInPool);
721 template <
typename Client>
723 c.messageDispatchingStarted(
724 hmbdcNumbers_[threadSerialNumber]);
725 context_detail::runOnceImpl(
726 hmbdcNumbers_[threadSerialNumber], stopped_, this->buffer_, c);
730 template <
typename cpa>
731 typename std::enable_if<cpa::has_pool && !cpa::pool_msgless, typename Pool::ptr>::type
732 createPool(
size_t maxPoolClientCount) {
733 return Pool::create(this->buffer(), maxPoolClientCount);
736 template <
typename cpa>
737 typename std::enable_if<cpa::pool_msgless, typename Pool::ptr>::type
738 createPool(
size_t maxPoolClientCount) {
739 return Pool::create(maxPoolClientCount);
742 template <
typename cpa>
743 typename std::enable_if<!cpa::has_pool && !cpa::pool_msgless, typename Pool::ptr>::type
745 return typename Pool::ptr();
748 template <
typename cpa>
749 typename std::enable_if<cpa::has_pool, void>::type
750 stopWithContextProperty() {
751 if (pool_) pool_->stop();
752 __atomic_thread_fence(__ATOMIC_ACQUIRE);
756 template <
typename cpa>
757 typename std::enable_if<!cpa::has_pool, void>::type
758 stopWithContextProperty() {
759 __atomic_thread_fence(__ATOMIC_ACQUIRE);
763 template <
typename cpa>
764 typename std::enable_if<cpa::has_pool, void>::type
765 joinWithContextProperty() {
766 if (pool_) pool_->join();
767 for (
auto& t : threads_) {
773 template <
typename cpa>
774 typename std::enable_if<!cpa::has_pool, void>::type
775 joinWithContextProperty() {
776 for (
auto& t : threads_) {
782 template <
typename cpa>
784 reserveSlots(std::list<uint16_t>&) {
787 template <
typename cpa,
typename ...Args>
788 typename std::enable_if<cpa::broadcast_msg && !cpa::pool_msgless, void>::type
789 reserveSlots(std::list<uint16_t>& slots, uint16_t poolThreadCount, uint64_t, Args&& ... args) {
790 auto available = this->buffer_.unusedConsumerIndexes();
791 if (available.size() < poolThreadCount) {
792 HMBDC_THROW(std::out_of_range
793 ,
"Context remaining capacilty = " << available.size()
794 <<
", consider increasing max_parallel_consumer");
796 for (uint16_t i = 0; i < poolThreadCount; ++i) {
797 slots.push_back(available[i]);
798 this->buffer_.reset(available[i]);
800 reserveSlots<cpa>(slots, std::forward<Args>(args) ...);
803 template <
typename cpa,
typename ...Args>
804 typename std::enable_if<!cpa::broadcast_msg || cpa::pool_msgless, void>::type
805 reserveSlots(std::list<uint16_t>& slots, uint16_t poolThreadCount, uint64_t, Args&& ... args) {
806 reserveSlots<cpa>(slots, std::forward<Args>(args) ...);
809 template <
typename cpa,
typename CcClient,
typename ...Args>
810 typename std::enable_if<cpa::broadcast_msg && !std::is_integral<CcClient>::value,
void>::type
811 reserveSlots(std::list<uint16_t>& slots, CcClient& c, uint64_t, Args&& ... args) {
812 const bool clientParticipateInMessaging =
813 std::remove_reference<CcClient>::type::REGISTERED_MESSAGE_SIZE != 0;
814 if (clientParticipateInMessaging) {
815 auto available = this->buffer_.unusedConsumerIndexes();
816 if (!available.size()) {
817 HMBDC_THROW(std::out_of_range
818 ,
"Context reached capacity, consider increasing max_parallel_consumer");
820 this->buffer_.reset(available[0]);
821 slots.push_back(available[0]);
823 reserveSlots<cpa>(slots, std::forward<Args>(args) ...);
826 template <
typename cpa,
typename CcClient,
typename ...Args>
827 typename std::enable_if<!cpa::broadcast_msg && !std::is_integral<CcClient>::value,
void>::type
828 reserveSlots(std::list<uint16_t>& slots, CcClient& c, uint64_t, Args&& ... args) {
831 template <
typename cpa,
typename ...Args>
832 typename std::enable_if<cpa::create_ipc || cpa::attach_ipc, void>::type
833 startWithContextProperty(Args&& ... args) {
834 auto& lock = this->allocator_.fileLock();
835 std::lock_guard<decltype(lock)> g(lock);
836 std::list<uint16_t> slots;
838 reserveSlots<cpa>(slots, args ...);
840 startWithContextPropertyImpl<cpa>(sc, std::forward<Args>(args) ...);
841 }
catch (std::out_of_range
const&) {
842 Base::markDead(this->buffer_, slots);
847 template <
typename cpa,
typename ...Args>
848 typename std::enable_if<!cpa::create_ipc && !cpa::attach_ipc, void>::type
849 startWithContextProperty(Args&& ... args) {
850 std::list<uint16_t> slots;
852 reserveSlots<cpa>(slots, args ...);
854 startWithContextPropertyImpl<cpa>(sc, std::forward<Args>(args) ...);
855 }
catch (std::out_of_range
const&) {
856 Base::markDead(this->buffer_, slots);
861 template <
typename cpa>
862 typename std::enable_if<cpa::broadcast_msg && cpa::create_ipc, void>::type
863 startWithContextPropertyImpl(std::list<uint16_t>& slots) {
867 startWithContextPropertyImpl<cpa>(slots, *purger_, purgerCpuAffinityMask_);
871 template <
typename cpa>
872 typename std::enable_if<!cpa::broadcast_msg || !cpa::create_ipc, void>::type
873 startWithContextPropertyImpl(std::list<uint16_t>& slots) {
876 template <
typename cpa,
typename ...Args>
877 typename std::enable_if<cpa::has_pool, void>::type
878 startWithContextPropertyImpl(std::list<uint16_t>& slots
879 , uint16_t poolThreadCount, uint64_t poolThreadsCpuAffinityMask
882 if (poolThreadCount_) {
883 HMBDC_THROW(std::out_of_range,
"Context pool already started");
885 std::vector<uint16_t> sc(slots.begin(), slots.end());
886 if (!poolThreadsCpuAffinityMask) {
887 auto cpuCount = std::thread::hardware_concurrency();
888 poolThreadsCpuAffinityMask =
889 ((1ul << poolThreadCount) - 1u) << (hmbdcNumbers_.size() % cpuCount);
892 pool_->startAt(poolThreadCount, poolThreadsCpuAffinityMask, sc);
893 while(poolThreadCount--) {
894 if (!cpa::pool_msgless) {
895 hmbdcNumbers_.push_back(*slots.begin());
899 poolThreadCount_ = poolThreadCount;
900 startWithContextPropertyImpl<cpa>(slots, std::forward<Args>(args) ...);
903 template <
typename cpa,
typename Client,
typename ...Args>
904 typename std::enable_if<!std::is_integral<Client>::value,
void>::type
905 startWithContextPropertyImpl(std::list<uint16_t>& slots
906 , Client& c, uint64_t cpuAffinity
908 auto clientParticipateInMessaging =
909 std::remove_reference<Client>::type::REGISTERED_MESSAGE_SIZE;
910 uint16_t hmbdcNumber = 0xffffu;
911 if (clientParticipateInMessaging && cpa::broadcast_msg) {
912 hmbdcNumber = *slots.begin();
915 auto thrd = kickOffClientThread(
916 c, cpuAffinity, hmbdcNumber, hmbdcNumbers_.size());
917 threads_.push_back(move(thrd));
918 hmbdcNumbers_.push_back(hmbdcNumber);
919 startWithContextPropertyImpl<cpa>(slots, std::forward<Args>(args) ...);
922 template <
typename Client>
923 auto kickOffClientThread(
924 Client& c, uint64_t mask, uint16_t hmbdcNumber, uint16_t threadSerialNumber) {
932 auto hmbdcNumber = h;
934 char const* schedule;
936 auto clientParticipateInMessaging =
937 std::remove_reference<Client>::type::REGISTERED_MESSAGE_SIZE;
943 if (clientParticipateInMessaging) {
944 name =
"hmbdc" + std::to_string(hmbdcNumber);
949 auto cpuAffinityMask = mask;
950 std::tie(schedule, priority) = c.
schedSpec();
952 if (!schedule) schedule =
"SCHED_OTHER";
955 auto cpuCount = std::thread::hardware_concurrency();
956 cpuAffinityMask = 1ul << (threadSerialNumber % cpuCount);
959 hmbdc::os::configureCurrentThread(name.c_str(), cpuAffinityMask
960 , schedule, priority);
962 hmbdcNumber = clientParticipateInMessaging?hmbdcNumber:0xffffu;
966 context_detail::runOnceImpl(hmbdcNumber, this->stopped_, this->buffer_, c)) {
968 if (this->stopped_) c.dropped();
969 if (clientParticipateInMessaging) context_detail::unblock(this->buffer_, hmbdcNumber);
978 uint16_t usedHmbdcCapacity_;
979 std::vector<uint16_t> hmbdcNumbers_;
982 typename Pool::ptr pool_;
983 using Threads = std::vector<std::thread>;
985 size_t poolThreadCount_;
986 uint32_t secondsBetweenPurge_;
987 uint64_t purgerCpuAffinityMask_;
988 typename std::conditional<cpa::broadcast_msg && cpa::create_ipc
989 , std::unique_ptr<StuckClientPurger<Buffer>>, uint32_t
void runClientThreadOnce(uint16_t threadSerialNumber, Client &c)
normally not used until you want to run your own message loop
Definition: Context.hpp:722
Definition: MonoLockFreeBuffer.hpp:15
Definition: ContextDetail.hpp:60
char const * hmbdcName() const
return the name of thread that runs this client, override if necessary
Definition: Client.hpp:57
Definition: StuckClientPurger.hpp:11
Context template parameter indicating the Context must contain a pool to run Clients and the Clients ...
Definition: Context.hpp:85
void stop()
stop the message dispatching - asynchronously
Definition: Context.hpp:672
covers the inter-thread and ipc communication fascade
Definition: Context.hpp:135
void join()
wait until all threads (Pool threads too if apply) of the Context exit
Definition: Context.hpp:681
Context template parameter inidcating each message is sent to one and only one of the clients within ...
Definition: Context.hpp:74
std::enable_if<!std::is_integral< Client2 >::value, void >::type addToPool(Client &client, Client2 &client2, Args &&...args)
add a bunch of clients to Context's pool - the Clients are run in pool mode
Definition: Context.hpp:609
Definition: TypedString.hpp:74
std::tuple< char const *, int > schedSpec() const
an overrideable method. returns the schedule policy and priority, override if necessary priority is o...
Definition: Client.hpp:67
Definition: PoolMinus.hpp:9
the default vanilla allocate
Definition: Allocators.hpp:116
void send(Message &&m)
send a message to the Context or attached ipc Contexts
Definition: Context.hpp:249
Definition: BlockingBuffer.hpp:10
void runPoolThreadOnce(uint16_t threadSerialNumberInPool)
normally not used until you want to run your own message loop
Definition: Context.hpp:709
bool trySend(ForwardIt begin, size_t n)
try send a range of messages to the Context or attached ipc Contexts
Definition: Context.hpp:225
Definition: ContextDetail.hpp:28
void send(ForwardIt begin, size_t n)
send a range of messages to the Context or attached ipc Contexts
Definition: Context.hpp:202
std::enable_if<!std::is_integral< M1 >::value, bool >::type trySend(M0 &&m0, M1 &&m1, Messages &&... msgs)
try to send a batch of message to the Context or attached ipc Contexts
Definition: Context.hpp:180
Context template parameter inidcating each message is sent to all clients within the Context...
Definition: Context.hpp:54
std::enable_if<!std::is_integral< M1 >::value, void >::type send(M0 &&m0, M1 &&m1, Messages &&... msgs)
try send a batch of messages to the Context or attached ipc Contexts
Definition: Context.hpp:161
void addToPool(Client &client, uint64_t poolThreadAffinityIn=0xfffffffffffffffful)
add a client to Context's pool - the Client is run in pool mode
Definition: Context.hpp:557
Definition: LockFreeBufferT.hpp:18
Context(char const *ipcTransportName, uint32_t messageQueueSizePower2Num=MaxMessageSize?20:0, size_t maxPoolClientCount=MaxMessageSize?128:0, size_t maxMessageSizeRuntime=MaxMessageSize, uint64_t purgerCpuAffinityMask=0xfffffffffffffffful, size_t maxThreadSerialNumber=64)
ctor for construct local ipc Context
Definition: Context.hpp:509
size_t parallelConsumerAlive() const
how many parallel consummers are started
Definition: Context.hpp:630
size_t clientCountInPool() const
return the numebr of clients added into pool
Definition: Context.hpp:619
A Context is like a media object that facilitates the communications for the Clients that it is holdi...
Definition: Context.hpp:461
~Context()
dtor
Definition: Context.hpp:533
Definition: Message.hpp:76
virtual void messageDispatchingStartedCb(uint16_t threadSerialNumber)
called before any messages got dispatched - only once
Definition: Client.hpp:91
Definition: BitMath.hpp:9
Buffer & buffer()
accessor - mostly used internally
Definition: Context.hpp:328
Context template parameter indicating the Context is ipc enabled and it can be attached (see ipc_atta...
Definition: Context.hpp:100
void addToPool(Client &client, uint64_t poolThreadAffinityIn, Args &&...args)
add a bunch of clients to Context's pool - the Clients are run in pool mode
Definition: Context.hpp:589
A Client represents a thread of execution/a task. The execution is managed by a Context. a Client object could participate in message dispatching as the receiver of specifed message types.
Definition: Client.hpp:45
void setSecondsBetweenPurge(uint32_t s)
ipc_creator Context runs a StcuClientPurger to purge crashed (or slow, stuck ...) Clients from the ip...
Definition: Context.hpp:697
Context(uint32_t messageQueueSizePower2Num=MaxMessageSize?20:2, size_t maxPoolClientCount=MaxMessageSize?128:0, size_t maxMessageSizeRuntime=MaxMessageSize, size_t maxThreadSerialNumber=64)
ctor for construct local non-ipc Context
Definition: Context.hpp:479
void start(Args &&... args)
start the context by specifying what are in it (Pool and/or direct Clients) and their paired up cpu a...
Definition: Context.hpp:662
bool trySendInPlace(Args &&... args)
try send a message to all Clients in the Context or attached ipc Contexts if it wouldn't block ...
Definition: Context.hpp:314
void sendInPlace(Args &&... args)
send a message to all Clients in the Context or attached ipc Contexts
Definition: Context.hpp:291
bool trySend(Message &&m)
try to send a message to the Context or attached ipc Contexts if it wouldn't block ...
Definition: Context.hpp:270
Context template parameter indicating the Context is ipc enabled and it can attach to an ipc transpor...
Definition: Context.hpp:113