1 /* Thread management routine
2 * Copyright (C) 1998, 2000 Kunihiro Ishiguro <kunihiro@zebra.org>
4 * This file is part of GNU Zebra.
6 * GNU Zebra is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2, or (at your option) any
11 * GNU Zebra is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with GNU Zebra; see the file COPYING. If not, write to the Free
18 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
25 #include <sys/resource.h>
35 #if defined(__APPLE__)
36 #include <mach/mach.h>
37 #include <mach/mach_time.h>
40 /* Recent absolute time of day */
41 struct timeval recent_time;
42 static struct timeval last_recent_time;
43 /* Relative time, since startup */
44 static struct timeval relative_time;
45 static struct timeval relative_time_base;
47 static unsigned short timers_inited;
49 static struct hash *cpu_record = NULL;
51 /* Struct timeval's tv_usec one second value. */
52 #define TIMER_SECOND_MICRO 1000000L
54 /* Adjust so that tv_usec is in the range [0,TIMER_SECOND_MICRO).
55 And change negative values to 0. */
57 timeval_adjust (struct timeval a)
59 while (a.tv_usec >= TIMER_SECOND_MICRO)
61 a.tv_usec -= TIMER_SECOND_MICRO;
67 a.tv_usec += TIMER_SECOND_MICRO;
72 /* Change negative timeouts to 0. */
73 a.tv_sec = a.tv_usec = 0;
79 timeval_subtract (struct timeval a, struct timeval b)
83 ret.tv_usec = a.tv_usec - b.tv_usec;
84 ret.tv_sec = a.tv_sec - b.tv_sec;
86 return timeval_adjust (ret);
90 timeval_cmp (struct timeval a, struct timeval b)
92 return (a.tv_sec == b.tv_sec
93 ? a.tv_usec - b.tv_usec : a.tv_sec - b.tv_sec);
97 timeval_elapsed (struct timeval a, struct timeval b)
99 return (((a.tv_sec - b.tv_sec) * TIMER_SECOND_MICRO)
100 + (a.tv_usec - b.tv_usec));
103 #if !defined(HAVE_CLOCK_MONOTONIC) && !defined(__APPLE__)
105 quagga_gettimeofday_relative_adjust (void)
108 if (timeval_cmp (recent_time, last_recent_time) < 0)
110 relative_time.tv_sec++;
111 relative_time.tv_usec = 0;
115 diff = timeval_subtract (recent_time, last_recent_time);
116 relative_time.tv_sec += diff.tv_sec;
117 relative_time.tv_usec += diff.tv_usec;
118 relative_time = timeval_adjust (relative_time);
120 last_recent_time = recent_time;
122 #endif /* !HAVE_CLOCK_MONOTONIC && !__APPLE__ */
124 /* gettimeofday wrapper, to keep recent_time updated */
126 quagga_gettimeofday (struct timeval *tv)
132 if (!(ret = gettimeofday (&recent_time, NULL)))
137 relative_time_base = last_recent_time = recent_time;
140 /* avoid copy if user passed recent_time pointer.. */
141 if (tv != &recent_time)
149 quagga_get_relative (struct timeval *tv)
153 #ifdef HAVE_CLOCK_MONOTONIC
156 if (!(ret = clock_gettime (CLOCK_MONOTONIC, &tp)))
158 relative_time.tv_sec = tp.tv_sec;
159 relative_time.tv_usec = tp.tv_nsec / 1000;
162 #elif defined(__APPLE__)
166 static mach_timebase_info_data_t timebase_info;
168 ticks = mach_absolute_time();
169 if (timebase_info.denom == 0)
170 mach_timebase_info(&timebase_info);
172 useconds = ticks * timebase_info.numer / timebase_info.denom / 1000;
173 relative_time.tv_sec = useconds / 1000000;
174 relative_time.tv_usec = useconds % 1000000;
178 #else /* !HAVE_CLOCK_MONOTONIC && !__APPLE__ */
179 if (!(ret = quagga_gettimeofday (&recent_time)))
180 quagga_gettimeofday_relative_adjust();
181 #endif /* HAVE_CLOCK_MONOTONIC */
189 /* Get absolute time stamp, but in terms of the internal timer
190 * Could be wrong, but at least won't go back.
193 quagga_real_stabilised (struct timeval *tv)
195 *tv = relative_time_base;
196 tv->tv_sec += relative_time.tv_sec;
197 tv->tv_usec += relative_time.tv_usec;
198 *tv = timeval_adjust (*tv);
201 /* Exported Quagga timestamp function.
202 * Modelled on POSIX clock_gettime.
205 quagga_gettime (enum quagga_clkid clkid, struct timeval *tv)
209 case QUAGGA_CLK_REALTIME:
210 return quagga_gettimeofday (tv);
211 case QUAGGA_CLK_MONOTONIC:
212 return quagga_get_relative (tv);
213 case QUAGGA_CLK_REALTIME_STABILISED:
214 quagga_real_stabilised (tv);
222 /* time_t value in terms of stabilised absolute time.
223 * replacement for POSIX time()
226 quagga_time (time_t *t)
229 quagga_real_stabilised (&tv);
235 /* Public export of recent_relative_time by value */
237 recent_relative_time (void)
239 return relative_time;
243 cpu_record_hash_key (struct cpu_thread_history *a)
245 return (uintptr_t) a->func;
249 cpu_record_hash_cmp (const struct cpu_thread_history *a,
250 const struct cpu_thread_history *b)
252 return a->func == b->func;
256 cpu_record_hash_alloc (struct cpu_thread_history *a)
258 struct cpu_thread_history *new;
259 new = XCALLOC (MTYPE_THREAD_STATS, sizeof (struct cpu_thread_history));
261 new->funcname = a->funcname;
266 cpu_record_hash_free (void *a)
268 struct cpu_thread_history *hist = a;
270 XFREE (MTYPE_THREAD_STATS, hist);
274 vty_out_cpu_thread_history(struct vty* vty,
275 struct cpu_thread_history *a)
278 vty_out(vty, "%7ld.%03ld %9d %8ld %9ld %8ld %9ld",
279 a->cpu.total/1000, a->cpu.total%1000, a->total_calls,
280 a->cpu.total/a->total_calls, a->cpu.max,
281 a->real.total/a->total_calls, a->real.max);
283 vty_out(vty, "%7ld.%03ld %9d %8ld %9ld",
284 a->real.total/1000, a->real.total%1000, a->total_calls,
285 a->real.total/a->total_calls, a->real.max);
287 vty_out(vty, " %c%c%c%c%c%c %s%s",
288 a->types & (1 << THREAD_READ) ? 'R':' ',
289 a->types & (1 << THREAD_WRITE) ? 'W':' ',
290 a->types & (1 << THREAD_TIMER) ? 'T':' ',
291 a->types & (1 << THREAD_EVENT) ? 'E':' ',
292 a->types & (1 << THREAD_EXECUTE) ? 'X':' ',
293 a->types & (1 << THREAD_BACKGROUND) ? 'B' : ' ',
294 a->funcname, VTY_NEWLINE);
298 cpu_record_hash_print(struct hash_backet *bucket,
301 struct cpu_thread_history *totals = args[0];
302 struct vty *vty = args[1];
303 thread_type *filter = args[2];
304 struct cpu_thread_history *a = bucket->data;
307 if ( !(a->types & *filter) )
309 vty_out_cpu_thread_history(vty,a);
310 totals->total_calls += a->total_calls;
311 totals->real.total += a->real.total;
312 if (totals->real.max < a->real.max)
313 totals->real.max = a->real.max;
315 totals->cpu.total += a->cpu.total;
316 if (totals->cpu.max < a->cpu.max)
317 totals->cpu.max = a->cpu.max;
322 cpu_record_print(struct vty *vty, thread_type filter)
324 struct cpu_thread_history tmp;
325 void *args[3] = {&tmp, vty, &filter};
327 memset(&tmp, 0, sizeof tmp);
328 tmp.funcname = "TOTAL";
332 vty_out(vty, "%21s %18s %18s%s",
333 "", "CPU (user+system):", "Real (wall-clock):", VTY_NEWLINE);
335 vty_out(vty, "Runtime(ms) Invoked Avg uSec Max uSecs");
337 vty_out(vty, " Avg uSec Max uSecs");
339 vty_out(vty, " Type Thread%s", VTY_NEWLINE);
340 hash_iterate(cpu_record,
341 (void(*)(struct hash_backet*,void*))cpu_record_hash_print,
344 if (tmp.total_calls > 0)
345 vty_out_cpu_thread_history(vty, &tmp);
348 DEFUN(show_thread_cpu,
350 "show thread cpu [FILTER]",
352 "Thread information\n"
354 "Display filter (rwtexb)\n")
357 thread_type filter = (thread_type) -1U;
362 while (argv[0][i] != '\0')
364 switch ( argv[0][i] )
368 filter |= (1 << THREAD_READ);
372 filter |= (1 << THREAD_WRITE);
376 filter |= (1 << THREAD_TIMER);
380 filter |= (1 << THREAD_EVENT);
384 filter |= (1 << THREAD_EXECUTE);
388 filter |= (1 << THREAD_BACKGROUND);
397 vty_out(vty, "Invalid filter \"%s\" specified,"
398 " must contain at least one of 'RWTEXB'%s",
399 argv[0], VTY_NEWLINE);
404 cpu_record_print(vty, filter);
409 cpu_record_hash_clear (struct hash_backet *bucket,
412 thread_type *filter = args;
413 struct cpu_thread_history *a = bucket->data;
416 if ( !(a->types & *filter) )
419 hash_release (cpu_record, bucket->data);
423 cpu_record_clear (thread_type filter)
425 thread_type *tmp = &filter;
426 hash_iterate (cpu_record,
427 (void (*) (struct hash_backet*,void*)) cpu_record_hash_clear,
431 DEFUN(clear_thread_cpu,
432 clear_thread_cpu_cmd,
433 "clear thread cpu [FILTER]",
434 "Clear stored data\n"
435 "Thread information\n"
437 "Display filter (rwtexb)\n")
440 thread_type filter = (thread_type) -1U;
445 while (argv[0][i] != '\0')
447 switch ( argv[0][i] )
451 filter |= (1 << THREAD_READ);
455 filter |= (1 << THREAD_WRITE);
459 filter |= (1 << THREAD_TIMER);
463 filter |= (1 << THREAD_EVENT);
467 filter |= (1 << THREAD_EXECUTE);
471 filter |= (1 << THREAD_BACKGROUND);
480 vty_out(vty, "Invalid filter \"%s\" specified,"
481 " must contain at least one of 'RWTEXB'%s",
482 argv[0], VTY_NEWLINE);
487 cpu_record_clear (filter);
492 thread_timer_cmp(void *a, void *b)
494 struct thread *thread_a = a;
495 struct thread *thread_b = b;
497 long cmp = timeval_cmp(thread_a->u.sands, thread_b->u.sands);
507 thread_timer_update(void *node, int actual_position)
509 struct thread *thread = node;
511 thread->index = actual_position;
514 /* Allocate new thread master. */
515 struct thread_master *
516 thread_master_create ()
518 struct thread_master *rv;
521 getrlimit(RLIMIT_NOFILE, &limit);
523 if (cpu_record == NULL)
525 = hash_create ((unsigned int (*) (void *))cpu_record_hash_key,
526 (int (*) (const void *, const void *))cpu_record_hash_cmp);
528 rv = XCALLOC (MTYPE_THREAD_MASTER, sizeof (struct thread_master));
534 rv->fd_limit = (int)limit.rlim_cur;
535 rv->read = XCALLOC (MTYPE_THREAD, sizeof (struct thread *) * rv->fd_limit);
536 if (rv->read == NULL)
538 XFREE (MTYPE_THREAD_MASTER, rv);
542 rv->write = XCALLOC (MTYPE_THREAD, sizeof (struct thread *) * rv->fd_limit);
543 if (rv->write == NULL)
545 XFREE (MTYPE_THREAD, rv->read);
546 XFREE (MTYPE_THREAD_MASTER, rv);
550 /* Initialize the timer queues */
551 rv->timer = pqueue_create();
552 rv->background = pqueue_create();
553 rv->timer->cmp = rv->background->cmp = thread_timer_cmp;
554 rv->timer->update = rv->background->update = thread_timer_update;
559 /* Add a new thread to the list. */
561 thread_list_add (struct thread_list *list, struct thread *thread)
564 thread->prev = list->tail;
566 list->tail->next = thread;
573 /* Delete a thread from the list. */
574 static struct thread *
575 thread_list_delete (struct thread_list *list, struct thread *thread)
578 thread->next->prev = thread->prev;
580 list->tail = thread->prev;
582 thread->prev->next = thread->next;
584 list->head = thread->next;
585 thread->next = thread->prev = NULL;
591 thread_delete_fd (struct thread **thread_array, struct thread *thread)
593 thread_array[thread->u.fd] = NULL;
597 thread_add_fd (struct thread **thread_array, struct thread *thread)
599 thread_array[thread->u.fd] = thread;
602 /* Move thread to unuse list. */
604 thread_add_unuse (struct thread *thread)
606 thread->type = THREAD_UNUSED;
607 assert (thread->master != NULL && thread != NULL);
608 assert (thread->next == NULL);
609 assert (thread->prev == NULL);
610 thread_list_add (&thread->master->unuse, thread);
613 /* Free all unused thread. */
615 thread_list_free (struct thread_master *m, struct thread_list *list)
620 for (t = list->head; t; t = next)
623 XFREE (MTYPE_THREAD, t);
630 thread_array_free (struct thread_master *m, struct thread **thread_array)
635 for (index = 0; index < m->fd_limit; ++index)
637 t = thread_array[index];
640 thread_array[index] = NULL;
641 XFREE (MTYPE_THREAD, t);
645 XFREE (MTYPE_THREAD, thread_array);
649 thread_queue_free (struct thread_master *m, struct pqueue *queue)
653 for (i = 0; i < queue->size; i++)
654 XFREE(MTYPE_THREAD, queue->array[i]);
656 m->alloc -= queue->size;
657 pqueue_delete(queue);
660 /* Stop thread scheduler. */
662 thread_master_free (struct thread_master *m)
664 thread_array_free (m, m->read);
665 thread_array_free (m, m->write);
666 thread_queue_free (m, m->timer);
667 thread_list_free (m, &m->event);
668 thread_list_free (m, &m->ready);
669 thread_list_free (m, &m->unuse);
670 thread_queue_free (m, m->background);
672 XFREE (MTYPE_THREAD_MASTER, m);
676 hash_clean (cpu_record, cpu_record_hash_free);
677 hash_free (cpu_record);
682 /* Thread list is empty or not. */
684 thread_empty (struct thread_list *list)
686 return list->head ? 0 : 1;
689 /* Delete top of the list and return it. */
690 static struct thread *
691 thread_trim_head (struct thread_list *list)
693 if (!thread_empty (list))
694 return thread_list_delete (list, list->head);
698 /* Return remain time in second. */
700 thread_timer_remain_second (struct thread *thread)
702 quagga_get_relative (NULL);
704 if (thread->u.sands.tv_sec - relative_time.tv_sec > 0)
705 return thread->u.sands.tv_sec - relative_time.tv_sec;
711 thread_timer_remain(struct thread *thread)
713 quagga_get_relative(NULL);
715 return timeval_subtract(thread->u.sands, relative_time);
718 #define debugargdef const char *funcname, const char *schedfrom, int fromln
719 #define debugargpass funcname, schedfrom, fromln
721 /* Get new thread. */
722 static struct thread *
723 thread_get (struct thread_master *m, u_char type,
724 int (*func) (struct thread *), void *arg, debugargdef)
726 struct thread *thread = thread_trim_head (&m->unuse);
730 thread = XCALLOC (MTYPE_THREAD, sizeof (struct thread));
734 thread->add_type = type;
740 thread->funcname = funcname;
741 thread->schedfrom = schedfrom;
742 thread->schedfrom_line = fromln;
747 #define fd_copy_fd_set(X) (X)
750 fd_select (int size, thread_fd_set *read, thread_fd_set *write, thread_fd_set *except, struct timeval *t)
752 return(select(size, read, write, except, t));
756 fd_is_set (int fd, thread_fd_set *fdset)
758 return FD_ISSET (fd, fdset);
762 fd_clear_read_write (int fd, thread_fd_set *fdset)
764 if (!FD_ISSET (fd, fdset))
771 static struct thread *
772 funcname_thread_add_read_write (int dir, struct thread_master *m,
773 int (*func) (struct thread *), void *arg, int fd,
776 struct thread *thread = NULL;
777 thread_fd_set *fdset = NULL;
779 if (dir == THREAD_READ)
784 if (FD_ISSET (fd, fdset))
786 zlog (NULL, LOG_WARNING, "There is already %s fd [%d]",
787 (dir = THREAD_READ) ? "read" : "write", fd);
793 thread = thread_get (m, dir, func, arg, debugargpass);
795 if (dir == THREAD_READ)
796 thread_add_fd (m->read, thread);
798 thread_add_fd (m->write, thread);
803 /* Add new read thread. */
805 funcname_thread_add_read (struct thread_master *m,
806 int (*func) (struct thread *), void *arg, int fd,
809 return funcname_thread_add_read_write (THREAD_READ, m, func,
810 arg, fd, debugargpass);
813 /* Add new write thread. */
815 funcname_thread_add_write (struct thread_master *m,
816 int (*func) (struct thread *), void *arg, int fd,
819 return funcname_thread_add_read_write (THREAD_WRITE, m, func,
820 arg, fd, debugargpass);
823 static struct thread *
824 funcname_thread_add_timer_timeval (struct thread_master *m,
825 int (*func) (struct thread *),
828 struct timeval *time_relative,
831 struct thread *thread;
832 struct pqueue *queue;
833 struct timeval alarm_time;
837 assert (type == THREAD_TIMER || type == THREAD_BACKGROUND);
838 assert (time_relative);
840 queue = ((type == THREAD_TIMER) ? m->timer : m->background);
841 thread = thread_get (m, type, func, arg, debugargpass);
843 /* Do we need jitter here? */
844 quagga_get_relative (NULL);
845 alarm_time.tv_sec = relative_time.tv_sec + time_relative->tv_sec;
846 alarm_time.tv_usec = relative_time.tv_usec + time_relative->tv_usec;
847 thread->u.sands = timeval_adjust(alarm_time);
849 pqueue_enqueue(thread, queue);
854 /* Add timer event thread. */
856 funcname_thread_add_timer (struct thread_master *m,
857 int (*func) (struct thread *),
858 void *arg, long timer,
868 return funcname_thread_add_timer_timeval (m, func, THREAD_TIMER, arg,
869 &trel, debugargpass);
872 /* Add timer event thread with "millisecond" resolution */
874 funcname_thread_add_timer_msec (struct thread_master *m,
875 int (*func) (struct thread *),
876 void *arg, long timer,
883 trel.tv_sec = timer / 1000;
884 trel.tv_usec = 1000*(timer % 1000);
886 return funcname_thread_add_timer_timeval (m, func, THREAD_TIMER,
887 arg, &trel, debugargpass);
890 /* Add timer event thread with "millisecond" resolution */
892 funcname_thread_add_timer_tv (struct thread_master *m,
893 int (*func) (struct thread *),
894 void *arg, struct timeval *tv,
897 return funcname_thread_add_timer_timeval (m, func, THREAD_TIMER,
898 arg, tv, debugargpass);
901 /* Add a background thread, with an optional millisec delay */
903 funcname_thread_add_background (struct thread_master *m,
904 int (*func) (struct thread *),
905 void *arg, long delay,
914 trel.tv_sec = delay / 1000;
915 trel.tv_usec = 1000*(delay % 1000);
923 return funcname_thread_add_timer_timeval (m, func, THREAD_BACKGROUND,
924 arg, &trel, debugargpass);
927 /* Add simple event thread. */
929 funcname_thread_add_event (struct thread_master *m,
930 int (*func) (struct thread *), void *arg, int val,
933 struct thread *thread;
937 thread = thread_get (m, THREAD_EVENT, func, arg, debugargpass);
939 thread_list_add (&m->event, thread);
944 /* Cancel thread from scheduler. */
946 thread_cancel (struct thread *thread)
948 struct thread_list *list = NULL;
949 struct pqueue *queue = NULL;
950 struct thread **thread_array = NULL;
952 switch (thread->type)
955 assert (fd_clear_read_write (thread->u.fd, &thread->master->readfd));
956 thread_array = thread->master->read;
959 assert (fd_clear_read_write (thread->u.fd, &thread->master->writefd));
960 thread_array = thread->master->write;
963 queue = thread->master->timer;
966 list = &thread->master->event;
969 list = &thread->master->ready;
971 case THREAD_BACKGROUND:
972 queue = thread->master->background;
981 assert(thread->index >= 0);
982 assert(thread == queue->array[thread->index]);
983 pqueue_remove_at(thread->index, queue);
987 thread_list_delete (list, thread);
989 else if (thread_array)
991 thread_delete_fd (thread_array, thread);
995 assert(!"Thread should be either in queue or list or array!");
998 thread_add_unuse (thread);
1001 /* Delete all events which has argument value arg. */
1003 thread_cancel_event (struct thread_master *m, void *arg)
1005 unsigned int ret = 0;
1006 struct thread *thread;
1008 thread = m->event.head;
1019 thread_list_delete (&m->event, t);
1020 thread_add_unuse (t);
1024 /* thread can be on the ready list too */
1025 thread = m->ready.head;
1036 thread_list_delete (&m->ready, t);
1037 thread_add_unuse (t);
1043 static struct timeval *
1044 thread_timer_wait (struct pqueue *queue, struct timeval *timer_val)
1048 struct thread *next_timer = queue->array[0];
1049 *timer_val = timeval_subtract (next_timer->u.sands, relative_time);
1056 thread_process_fds_helper (struct thread_master *m, struct thread *thread, thread_fd_set *fdset)
1058 thread_fd_set *mfdset = NULL;
1059 struct thread **thread_array;
1064 if (thread->type == THREAD_READ)
1066 mfdset = &m->readfd;
1067 thread_array = m->read;
1071 mfdset = &m->writefd;
1072 thread_array = m->write;
1075 if (fd_is_set (THREAD_FD (thread), fdset))
1077 fd_clear_read_write (THREAD_FD (thread), mfdset);
1078 thread_delete_fd (thread_array, thread);
1079 thread_list_add (&m->ready, thread);
1080 thread->type = THREAD_READY;
1087 thread_process_fds (struct thread_master *m, thread_fd_set *rset, thread_fd_set *wset, int num)
1089 int ready = 0, index;
1091 for (index = 0; index < m->fd_limit && ready < num; ++index)
1093 ready += thread_process_fds_helper (m, m->read[index], rset);
1094 ready += thread_process_fds_helper (m, m->write[index], wset);
1099 /* Add all timers that have popped to the ready list. */
1101 thread_timer_process (struct pqueue *queue, struct timeval *timenow)
1103 struct thread *thread;
1104 unsigned int ready = 0;
1108 thread = queue->array[0];
1109 if (timeval_cmp (*timenow, thread->u.sands) < 0)
1111 pqueue_dequeue(queue);
1112 thread->type = THREAD_READY;
1113 thread_list_add (&thread->master->ready, thread);
1119 /* process a list en masse, e.g. for event thread lists */
1121 thread_process (struct thread_list *list)
1123 struct thread *thread;
1124 struct thread *next;
1125 unsigned int ready = 0;
1127 for (thread = list->head; thread; thread = next)
1129 next = thread->next;
1130 thread_list_delete (list, thread);
1131 thread->type = THREAD_READY;
1132 thread_list_add (&thread->master->ready, thread);
1138 /* Fetch next ready thread. */
1139 static struct thread *
1140 thread_fetch (struct thread_master *m)
1142 struct thread *thread;
1143 thread_fd_set readfd;
1144 thread_fd_set writefd;
1145 thread_fd_set exceptfd;
1146 struct timeval timer_val = { .tv_sec = 0, .tv_usec = 0 };
1147 struct timeval timer_val_bg;
1148 struct timeval *timer_wait = &timer_val;
1149 struct timeval *timer_wait_bg;
1155 /* Signals pre-empt everything */
1156 quagga_sigevent_process ();
1158 /* Drain the ready queue of already scheduled jobs, before scheduling
1161 if ((thread = thread_trim_head (&m->ready)) != NULL)
1164 /* To be fair to all kinds of threads, and avoid starvation, we
1165 * need to be careful to consider all thread types for scheduling
1166 * in each quanta. I.e. we should not return early from here on.
1169 /* Normal event are the next highest priority. */
1170 thread_process (&m->event);
1172 /* Structure copy. */
1173 readfd = fd_copy_fd_set(m->readfd);
1174 writefd = fd_copy_fd_set(m->writefd);
1175 exceptfd = fd_copy_fd_set(m->exceptfd);
1177 /* Calculate select wait timer if nothing else to do */
1178 if (m->ready.count == 0)
1180 quagga_get_relative (NULL);
1181 timer_wait = thread_timer_wait (m->timer, &timer_val);
1182 timer_wait_bg = thread_timer_wait (m->background, &timer_val_bg);
1184 if (timer_wait_bg &&
1185 (!timer_wait || (timeval_cmp (*timer_wait, *timer_wait_bg) > 0)))
1186 timer_wait = timer_wait_bg;
1189 num = fd_select (FD_SETSIZE, &readfd, &writefd, &exceptfd, timer_wait);
1191 /* Signals should get quick treatment */
1195 continue; /* signal received - process it */
1196 zlog_warn ("select() error: %s", safe_strerror (errno));
1200 /* Check foreground timers. Historically, they have had higher
1201 priority than I/O threads, so let's push them onto the ready
1202 list in front of the I/O threads. */
1203 quagga_get_relative (NULL);
1204 thread_timer_process (m->timer, &relative_time);
1206 /* Got IO, process it */
1208 thread_process_fds (m, &readfd, &writefd, num);
1211 /* If any threads were made ready above (I/O or foreground timer),
1212 perhaps we should avoid adding background timers to the ready
1213 list at this time. If this is code is uncommented, then background
1214 timer threads will not run unless there is nothing else to do. */
1215 if ((thread = thread_trim_head (&m->ready)) != NULL)
1219 /* Background timer/events, lowest priority */
1220 thread_timer_process (m->background, &relative_time);
1222 if ((thread = thread_trim_head (&m->ready)) != NULL)
1228 thread_consumed_time (RUSAGE_T *now, RUSAGE_T *start, unsigned long *cputime)
1231 /* This is 'user + sys' time. */
1232 *cputime = timeval_elapsed (now->cpu.ru_utime, start->cpu.ru_utime) +
1233 timeval_elapsed (now->cpu.ru_stime, start->cpu.ru_stime);
1236 #endif /* HAVE_RUSAGE */
1237 return timeval_elapsed (now->real, start->real);
1240 /* We should aim to yield after THREAD_YIELD_TIME_SLOT milliseconds.
1241 Note: we are using real (wall clock) time for this calculation.
1242 It could be argued that CPU time may make more sense in certain
1243 contexts. The things to consider are whether the thread may have
1244 blocked (in which case wall time increases, but CPU time does not),
1245 or whether the system is heavily loaded with other processes competing
1246 for CPU time. On balance, wall clock time seems to make sense.
1247 Plus it has the added benefit that gettimeofday should be faster
1248 than calling getrusage. */
1250 thread_should_yield (struct thread *thread)
1252 quagga_get_relative (NULL);
1253 unsigned long t = timeval_elapsed(relative_time, thread->real);
1254 return ((t > THREAD_YIELD_TIME_SLOT) ? t : 0);
1258 thread_getrusage (RUSAGE_T *r)
1260 quagga_get_relative (NULL);
1262 getrusage(RUSAGE_SELF, &(r->cpu));
1264 r->real = relative_time;
1266 #ifdef HAVE_CLOCK_MONOTONIC
1267 /* quagga_get_relative() only updates recent_time if gettimeofday
1268 * based, not when using CLOCK_MONOTONIC. As we export recent_time
1269 * and guarantee to update it before threads are run...
1271 quagga_gettimeofday(&recent_time);
1272 #endif /* HAVE_CLOCK_MONOTONIC */
1275 struct thread *thread_current = NULL;
1277 /* We check thread consumed time. If the system has getrusage, we'll
1278 use that to get in-depth stats on the performance of the thread in addition
1279 to wall clock time stats from gettimeofday.
1281 'Dummy' threads (e.g. see funcname_thread_execute) must have
1282 thread->master == NULL.
1286 thread_call (struct thread *thread)
1288 unsigned long realtime, cputime;
1289 RUSAGE_T before, after;
1291 /* Cache a pointer to the relevant cpu history thread, if the thread
1292 * does not have it yet.
1294 * Callers submitting 'dummy threads' hence must take care that
1295 * thread->cpu is NULL
1299 struct cpu_thread_history tmp;
1301 tmp.func = thread->func;
1302 tmp.funcname = thread->funcname;
1304 thread->hist = hash_get (cpu_record, &tmp,
1305 (void * (*) (void *))cpu_record_hash_alloc);
1308 GETRUSAGE (&before);
1309 thread->real = before.real;
1311 thread_current = thread;
1312 (*thread->func) (thread);
1313 thread_current = NULL;
1317 realtime = thread_consumed_time (&after, &before, &cputime);
1318 thread->hist->real.total += realtime;
1319 if (thread->hist->real.max < realtime)
1320 thread->hist->real.max = realtime;
1322 thread->hist->cpu.total += cputime;
1323 if (thread->hist->cpu.max < cputime)
1324 thread->hist->cpu.max = cputime;
1327 ++(thread->hist->total_calls);
1328 thread->hist->types |= (1 << thread->add_type);
1330 #ifdef CONSUMED_TIME_CHECK
1331 if (realtime > CONSUMED_TIME_CHECK)
1334 * We have a CPU Hog on our hands.
1335 * Whinge about it now, so we're aware this is yet another task
1338 zlog_warn ("SLOW THREAD: task %s (%lx) ran for %lums (cpu time %lums)",
1340 (unsigned long) thread->func,
1341 realtime/1000, cputime/1000);
1343 #endif /* CONSUMED_TIME_CHECK */
1347 thread_add_unuse (thread);
1350 /* Execute thread */
1352 funcname_thread_execute (struct thread_master *m,
1353 int (*func)(struct thread *),
1358 struct thread dummy;
1360 memset (&dummy, 0, sizeof (struct thread));
1362 dummy.type = THREAD_EVENT;
1363 dummy.add_type = THREAD_EXECUTE;
1364 dummy.master = NULL;
1369 dummy.funcname = funcname;
1370 dummy.schedfrom = schedfrom;
1371 dummy.schedfrom_line = fromln;
1373 thread_call (&dummy);
1378 /* Co-operative thread main loop */
1380 thread_main (struct thread_master *master)
1383 while ((t = thread_fetch (master)))