Lines Matching defs:me

82 static apr_status_t thread_pool_construct(apr_thread_pool_t * me,
89 me->thd_max = max_threads;
90 me->idle_max = init_threads;
91 me->threshold = init_threads / 2;
92 rv = apr_thread_mutex_create(&me->lock, APR_THREAD_MUTEX_NESTED,
93 me->pool);
97 rv = apr_thread_cond_create(&me->cond, me->pool);
99 apr_thread_mutex_destroy(me->lock);
102 me->tasks = apr_palloc(me->pool, sizeof(*me->tasks));
103 if (!me->tasks) {
106 APR_RING_INIT(me->tasks, apr_thread_pool_task, link);
107 me->scheduled_tasks = apr_palloc(me->pool, sizeof(*me->scheduled_tasks));
108 if (!me->scheduled_tasks) {
111 APR_RING_INIT(me->scheduled_tasks, apr_thread_pool_task, link);
112 me->recycled_tasks = apr_palloc(me->pool, sizeof(*me->recycled_tasks));
113 if (!me->recycled_tasks) {
116 APR_RING_INIT(me->recycled_tasks, apr_thread_pool_task, link);
117 me->busy_thds = apr_palloc(me->pool, sizeof(*me->busy_thds));
118 if (!me->busy_thds) {
121 APR_RING_INIT(me->busy_thds, apr_thread_list_elt, link);
122 me->idle_thds = apr_palloc(me->pool, sizeof(*me->idle_thds));
123 if (!me->idle_thds) {
126 APR_RING_INIT(me->idle_thds, apr_thread_list_elt, link);
127 me->recycled_thds = apr_palloc(me->pool, sizeof(*me->recycled_thds));
128 if (!me->recycled_thds) {
131 APR_RING_INIT(me->recycled_thds, apr_thread_list_elt, link);
132 me->thd_cnt = me->idle_cnt = me->task_cnt = me->scheduled_task_cnt = 0;
133 me->tasks_run = me->tasks_high = me->thd_high = me->thd_timed_out = 0;
134 me->idle_wait = 0;
135 me->terminated = 0;
137 me->task_idx[i] = NULL;
142 apr_thread_mutex_destroy(me->lock);
143 apr_thread_cond_destroy(me->cond);
151 static apr_thread_pool_task_t *pop_task(apr_thread_pool_t * me)
157 if (me->scheduled_task_cnt > 0) {
158 task = APR_RING_FIRST(me->scheduled_tasks);
161 APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task,
165 --me->scheduled_task_cnt;
171 if (me->task_cnt == 0) {
175 task = APR_RING_FIRST(me->tasks);
177 assert(task != APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link));
178 --me->task_cnt;
180 if (task == me->task_idx[seg]) {
181 me->task_idx[seg] = APR_RING_NEXT(task, link);
182 if (me->task_idx[seg] == APR_RING_SENTINEL(me->tasks,
184 || TASK_PRIORITY_SEG(me->task_idx[seg]) != seg) {
185 me->task_idx[seg] = NULL;
192 static apr_interval_time_t waiting_time(apr_thread_pool_t * me)
196 task = APR_RING_FIRST(me->scheduled_tasks);
199 APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task,
207 static struct apr_thread_list_elt *elt_new(apr_thread_pool_t * me,
212 if (APR_RING_EMPTY(me->recycled_thds, apr_thread_list_elt, link)) {
213 elt = apr_pcalloc(me->pool, sizeof(*elt));
219 elt = APR_RING_FIRST(me->recycled_thds);
240 apr_thread_pool_t *me = param;
245 apr_thread_mutex_lock(me->lock);
246 elt = elt_new(me, t);
248 apr_thread_mutex_unlock(me->lock);
252 while (!me->terminated && elt->state != TH_STOP) {
255 --me->idle_cnt;
259 APR_RING_INSERT_TAIL(me->busy_thds, elt, apr_thread_list_elt, link);
260 task = pop_task(me);
261 while (NULL != task && !me->terminated) {
262 ++me->tasks_run;
264 apr_thread_mutex_unlock(me->lock);
267 apr_thread_mutex_lock(me->lock);
268 APR_RING_INSERT_TAIL(me->recycled_tasks, task,
274 task = pop_task(me);
281 if ((me->idle_cnt >= me->idle_max
282 && !(me->scheduled_task_cnt && 0 >= me->idle_max)
283 && !me->idle_wait)
284 || me->terminated || elt->state != TH_RUN) {
285 --me->thd_cnt;
286 if ((TH_PROBATION == elt->state) && me->idle_wait)
287 ++me->thd_timed_out;
288 APR_RING_INSERT_TAIL(me->recycled_thds, elt,
290 apr_thread_mutex_unlock(me->lock);
297 ++me->idle_cnt;
298 APR_RING_INSERT_TAIL(me->idle_thds, elt, apr_thread_list_elt, link);
305 if (me->scheduled_task_cnt)
306 wait = waiting_time(me);
307 else if (me->idle_cnt > me->idle_max) {
308 wait = me->idle_wait;
315 apr_thread_cond_timedwait(me->cond, me->lock, wait);
318 apr_thread_cond_wait(me->cond, me->lock);
323 --me->thd_cnt;
324 apr_thread_mutex_unlock(me->lock);
329 static apr_status_t thread_pool_cleanup(void *me)
331 apr_thread_pool_t *_myself = me;
343 APU_DECLARE(apr_status_t) apr_thread_pool_create(apr_thread_pool_t ** me,
352 *me = NULL;
370 * allocate from (*me)->pool. This is dangerous if there are multiple
387 *me = tp;
393 APU_DECLARE(apr_status_t) apr_thread_pool_destroy(apr_thread_pool_t * me)
395 apr_pool_destroy(me->pool);
402 static apr_thread_pool_task_t *task_new(apr_thread_pool_t * me,
409 if (APR_RING_EMPTY(me->recycled_tasks, apr_thread_pool_task, link)) {
410 t = apr_pcalloc(me->pool, sizeof(*t));
416 t = APR_RING_FIRST(me->recycled_tasks);
440 static apr_thread_pool_task_t *add_if_empty(apr_thread_pool_t * me,
448 if (me->task_idx[seg]) {
449 assert(APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link) !=
450 me->task_idx[seg]);
451 t_next = me->task_idx[seg];
454 if (APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link) ==
463 if (me->task_idx[next]) {
464 APR_RING_INSERT_BEFORE(me->task_idx[next], t, link);
469 APR_RING_INSERT_TAIL(me->tasks, t, apr_thread_pool_task, link);
471 me->task_idx[seg] = t;
479 static apr_status_t schedule_task(apr_thread_pool_t *me,
487 apr_thread_mutex_lock(me->lock);
489 t = task_new(me, func, param, 0, owner, time);
491 apr_thread_mutex_unlock(me->lock);
494 t_loc = APR_RING_FIRST(me->scheduled_tasks);
498 ++me->scheduled_task_cnt;
505 APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task,
507 ++me->scheduled_task_cnt;
508 APR_RING_INSERT_TAIL(me->scheduled_tasks, t,
515 if (0 == me->thd_cnt) {
516 rv = apr_thread_create(&thd, NULL, thread_pool_func, me, me->pool);
518 ++me->thd_cnt;
519 if (me->thd_cnt > me->thd_high)
520 me->thd_high = me->thd_cnt;
523 apr_thread_cond_signal(me->cond);
524 apr_thread_mutex_unlock(me->lock);
528 static apr_status_t add_task(apr_thread_pool_t *me, apr_thread_start_t func,
537 apr_thread_mutex_lock(me->lock);
539 t = task_new(me, func, param, priority, owner, 0);
541 apr_thread_mutex_unlock(me->lock);
545 t_loc = add_if_empty(me, t);
551 while (APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link) !=
558 if (t_loc == me->task_idx[TASK_PRIORITY_SEG(t)]) {
559 me->task_idx[TASK_PRIORITY_SEG(t)] = t;
564 me->task_cnt++;
565 if (me->task_cnt > me->tasks_high)
566 me->tasks_high = me->task_cnt;
567 if (0 == me->thd_cnt || (0 == me->idle_cnt && me->thd_cnt < me->thd_max &&
568 me->task_cnt > me->threshold)) {
569 rv = apr_thread_create(&thd, NULL, thread_pool_func, me, me->pool);
571 ++me->thd_cnt;
572 if (me->thd_cnt > me->thd_high)
573 me->thd_high = me->thd_cnt;
577 apr_thread_cond_signal(me->cond);
578 apr_thread_mutex_unlock(me->lock);
583 APU_DECLARE(apr_status_t) apr_thread_pool_push(apr_thread_pool_t *me,
589 return add_task(me, func, param, priority, 1, owner);
592 APU_DECLARE(apr_status_t) apr_thread_pool_schedule(apr_thread_pool_t *me,
598 return schedule_task(me, func, param, owner, time);
601 APU_DECLARE(apr_status_t) apr_thread_pool_top(apr_thread_pool_t *me,
607 return add_task(me, func, param, priority, 0, owner);
610 static apr_status_t remove_scheduled_tasks(apr_thread_pool_t *me,
616 t_loc = APR_RING_FIRST(me->scheduled_tasks);
618 APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task,
623 --me->scheduled_task_cnt;
631 static apr_status_t remove_tasks(apr_thread_pool_t *me, void *owner)
637 t_loc = APR_RING_FIRST(me->tasks);
638 while (t_loc != APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link)) {
641 --me->task_cnt;
643 if (t_loc == me->task_idx[seg]) {
644 me->task_idx[seg] = APR_RING_NEXT(t_loc, link);
645 if (me->task_idx[seg] == APR_RING_SENTINEL(me->tasks,
648 || TASK_PRIORITY_SEG(me->task_idx[seg]) != seg) {
649 me->task_idx[seg] = NULL;
659 static void wait_on_busy_threads(apr_thread_pool_t *me, void *owner)
665 apr_thread_mutex_lock(me->lock);
666 elt = APR_RING_FIRST(me->busy_thds);
667 while (elt != APR_RING_SENTINEL(me->busy_thds, apr_thread_list_elt, link)) {
683 apr_thread_mutex_unlock(me->lock);
685 apr_thread_mutex_lock(me->lock);
687 elt = APR_RING_FIRST(me->busy_thds);
689 apr_thread_mutex_unlock(me->lock);
693 APU_DECLARE(apr_status_t) apr_thread_pool_tasks_cancel(apr_thread_pool_t *me,
698 apr_thread_mutex_lock(me->lock);
699 if (me->task_cnt > 0) {
700 rv = remove_tasks(me, owner);
702 if (me->scheduled_task_cnt > 0) {
703 rv = remove_scheduled_tasks(me, owner);
705 apr_thread_mutex_unlock(me->lock);
706 wait_on_busy_threads(me, owner);
711 APU_DECLARE(apr_size_t) apr_thread_pool_tasks_count(apr_thread_pool_t *me)
713 return me->task_cnt;
717 apr_thread_pool_scheduled_tasks_count(apr_thread_pool_t *me)
719 return me->scheduled_task_cnt;
722 APU_DECLARE(apr_size_t) apr_thread_pool_threads_count(apr_thread_pool_t *me)
724 return me->thd_cnt;
727 APU_DECLARE(apr_size_t) apr_thread_pool_busy_count(apr_thread_pool_t *me)
729 return me->thd_cnt - me->idle_cnt;
732 APU_DECLARE(apr_size_t) apr_thread_pool_idle_count(apr_thread_pool_t *me)
734 return me->idle_cnt;
738 apr_thread_pool_tasks_run_count(apr_thread_pool_t * me)
740 return me->tasks_run;
744 apr_thread_pool_tasks_high_count(apr_thread_pool_t * me)
746 return me->tasks_high;
750 apr_thread_pool_threads_high_count(apr_thread_pool_t * me)
752 return me->thd_high;
756 apr_thread_pool_threads_idle_timeout_count(apr_thread_pool_t * me)
758 return me->thd_timed_out;
762 APU_DECLARE(apr_size_t) apr_thread_pool_idle_max_get(apr_thread_pool_t *me)
764 return me->idle_max;
768 apr_thread_pool_idle_wait_get(apr_thread_pool_t * me)
770 return me->idle_wait;
778 static struct apr_thread_list_elt *trim_threads(apr_thread_pool_t *me,
785 apr_thread_mutex_lock(me->lock);
787 thds = me->idle_thds;
788 n = me->idle_cnt;
791 thds = me->busy_thds;
792 n = me->thd_cnt - me->idle_cnt;
795 apr_thread_mutex_unlock(me->lock);
808 me->idle_cnt = *cnt;
821 apr_thread_mutex_unlock(me->lock);
828 static apr_size_t trim_idle_threads(apr_thread_pool_t *me, apr_size_t cnt)
834 elt = trim_threads(me, &cnt, 1);
836 apr_thread_mutex_lock(me->lock);
837 apr_thread_cond_broadcast(me->cond);
838 apr_thread_mutex_unlock(me->lock);
848 apr_thread_mutex_lock(me->lock);
849 APR_RING_SPLICE_TAIL(me->recycled_thds, head, tail,
851 apr_thread_mutex_unlock(me->lock);
861 static apr_size_t trim_busy_threads(apr_thread_pool_t *me, apr_size_t cnt)
863 trim_threads(me, &cnt, 0);
867 APU_DECLARE(apr_size_t) apr_thread_pool_idle_max_set(apr_thread_pool_t *me,
870 me->idle_max = cnt;
871 cnt = trim_idle_threads(me, cnt);
876 apr_thread_pool_idle_wait_set(apr_thread_pool_t * me,
881 oldtime = me->idle_wait;
882 me->idle_wait = timeout;
887 APU_DECLARE(apr_size_t) apr_thread_pool_thread_max_get(apr_thread_pool_t *me)
889 return me->thd_max;
896 APU_DECLARE(apr_size_t) apr_thread_pool_thread_max_set(apr_thread_pool_t *me,
901 me->thd_max = cnt;
902 if (0 == cnt || me->thd_cnt <= cnt) {
906 n = me->thd_cnt - cnt;
907 if (n >= me->idle_cnt) {
908 trim_busy_threads(me, n - me->idle_cnt);
909 trim_idle_threads(me, 0);
912 trim_idle_threads(me, me->idle_cnt - n);
917 APU_DECLARE(apr_size_t) apr_thread_pool_threshold_get(apr_thread_pool_t *me)
919 return me->threshold;
922 APU_DECLARE(apr_size_t) apr_thread_pool_threshold_set(apr_thread_pool_t *me,
927 ov = me->threshold;
928 me->threshold = val;