eloop.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113
  1. /*
  2. * Event loop based on select() loop
  3. * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
  4. *
  5. * This software may be distributed under the terms of the BSD license.
  6. * See README for more details.
  7. */
  8. #include "includes.h"
  9. #include <assert.h>
  10. #include "common.h"
  11. #include "trace.h"
  12. #include "list.h"
  13. #include "eloop.h"
  14. #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL)
  15. #error Do not define both of poll and epoll
  16. #endif
  17. #if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL)
  18. #define CONFIG_ELOOP_SELECT
  19. #endif
  20. #ifdef CONFIG_ELOOP_POLL
  21. #include <poll.h>
  22. #endif /* CONFIG_ELOOP_POLL */
  23. #ifdef CONFIG_ELOOP_EPOLL
  24. #include <sys/epoll.h>
  25. #endif /* CONFIG_ELOOP_EPOLL */
  26. struct eloop_sock {
  27. int sock;
  28. void *eloop_data;
  29. void *user_data;
  30. eloop_sock_handler handler;
  31. WPA_TRACE_REF(eloop);
  32. WPA_TRACE_REF(user);
  33. WPA_TRACE_INFO
  34. };
  35. struct eloop_timeout {
  36. struct dl_list list;
  37. struct os_reltime time;
  38. void *eloop_data;
  39. void *user_data;
  40. eloop_timeout_handler handler;
  41. WPA_TRACE_REF(eloop);
  42. WPA_TRACE_REF(user);
  43. WPA_TRACE_INFO
  44. };
  45. struct eloop_signal {
  46. int sig;
  47. void *user_data;
  48. eloop_signal_handler handler;
  49. int signaled;
  50. };
  51. struct eloop_sock_table {
  52. int count;
  53. struct eloop_sock *table;
  54. #ifdef CONFIG_ELOOP_EPOLL
  55. eloop_event_type type;
  56. #else /* CONFIG_ELOOP_EPOLL */
  57. int changed;
  58. #endif /* CONFIG_ELOOP_EPOLL */
  59. };
  60. struct eloop_data {
  61. int max_sock;
  62. int count; /* sum of all table counts */
  63. #ifdef CONFIG_ELOOP_POLL
  64. int max_pollfd_map; /* number of pollfds_map currently allocated */
  65. int max_poll_fds; /* number of pollfds currently allocated */
  66. struct pollfd *pollfds;
  67. struct pollfd **pollfds_map;
  68. #endif /* CONFIG_ELOOP_POLL */
  69. #ifdef CONFIG_ELOOP_EPOLL
  70. int epollfd;
  71. int epoll_max_event_num;
  72. int epoll_max_fd;
  73. struct eloop_sock *epoll_table;
  74. struct epoll_event *epoll_events;
  75. #endif /* CONFIG_ELOOP_EPOLL */
  76. struct eloop_sock_table readers;
  77. struct eloop_sock_table writers;
  78. struct eloop_sock_table exceptions;
  79. struct dl_list timeout;
  80. int signal_count;
  81. struct eloop_signal *signals;
  82. int signaled;
  83. int pending_terminate;
  84. int terminate;
  85. };
  86. static struct eloop_data eloop;
  87. #ifdef WPA_TRACE
  88. static void eloop_sigsegv_handler(int sig)
  89. {
  90. wpa_trace_show("eloop SIGSEGV");
  91. abort();
  92. }
  93. static void eloop_trace_sock_add_ref(struct eloop_sock_table *table)
  94. {
  95. int i;
  96. if (table == NULL || table->table == NULL)
  97. return;
  98. for (i = 0; i < table->count; i++) {
  99. wpa_trace_add_ref(&table->table[i], eloop,
  100. table->table[i].eloop_data);
  101. wpa_trace_add_ref(&table->table[i], user,
  102. table->table[i].user_data);
  103. }
  104. }
  105. static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table)
  106. {
  107. int i;
  108. if (table == NULL || table->table == NULL)
  109. return;
  110. for (i = 0; i < table->count; i++) {
  111. wpa_trace_remove_ref(&table->table[i], eloop,
  112. table->table[i].eloop_data);
  113. wpa_trace_remove_ref(&table->table[i], user,
  114. table->table[i].user_data);
  115. }
  116. }
  117. #else /* WPA_TRACE */
  118. #define eloop_trace_sock_add_ref(table) do { } while (0)
  119. #define eloop_trace_sock_remove_ref(table) do { } while (0)
  120. #endif /* WPA_TRACE */
  121. int eloop_init(void)
  122. {
  123. os_memset(&eloop, 0, sizeof(eloop));
  124. dl_list_init(&eloop.timeout);
  125. #ifdef CONFIG_ELOOP_EPOLL
  126. eloop.epollfd = epoll_create1(0);
  127. if (eloop.epollfd < 0) {
  128. wpa_printf(MSG_ERROR, "%s: epoll_create1 failed. %s\n",
  129. __func__, strerror(errno));
  130. return -1;
  131. }
  132. eloop.readers.type = EVENT_TYPE_READ;
  133. eloop.writers.type = EVENT_TYPE_WRITE;
  134. eloop.exceptions.type = EVENT_TYPE_EXCEPTION;
  135. #endif /* CONFIG_ELOOP_EPOLL */
  136. #ifdef WPA_TRACE
  137. signal(SIGSEGV, eloop_sigsegv_handler);
  138. #endif /* WPA_TRACE */
  139. return 0;
  140. }
  141. static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
  142. int sock, eloop_sock_handler handler,
  143. void *eloop_data, void *user_data)
  144. {
  145. #ifdef CONFIG_ELOOP_EPOLL
  146. struct eloop_sock *temp_table;
  147. struct epoll_event ev, *temp_events;
  148. int next;
  149. #endif /* CONFIG_ELOOP_EPOLL */
  150. struct eloop_sock *tmp;
  151. int new_max_sock;
  152. if (sock > eloop.max_sock)
  153. new_max_sock = sock;
  154. else
  155. new_max_sock = eloop.max_sock;
  156. if (table == NULL)
  157. return -1;
  158. #ifdef CONFIG_ELOOP_POLL
  159. if (new_max_sock >= eloop.max_pollfd_map) {
  160. struct pollfd **nmap;
  161. nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50,
  162. sizeof(struct pollfd *));
  163. if (nmap == NULL)
  164. return -1;
  165. eloop.max_pollfd_map = new_max_sock + 50;
  166. eloop.pollfds_map = nmap;
  167. }
  168. if (eloop.count + 1 > eloop.max_poll_fds) {
  169. struct pollfd *n;
  170. int nmax = eloop.count + 1 + 50;
  171. n = os_realloc_array(eloop.pollfds, nmax,
  172. sizeof(struct pollfd));
  173. if (n == NULL)
  174. return -1;
  175. eloop.max_poll_fds = nmax;
  176. eloop.pollfds = n;
  177. }
  178. #endif /* CONFIG_ELOOP_POLL */
  179. #ifdef CONFIG_ELOOP_EPOLL
  180. if (new_max_sock >= eloop.epoll_max_fd) {
  181. next = eloop.epoll_max_fd == 0 ? 16 : eloop.epoll_max_fd * 2;
  182. temp_table = os_realloc_array(eloop.epoll_table, next,
  183. sizeof(struct eloop_sock));
  184. if (temp_table == NULL)
  185. return -1;
  186. eloop.epoll_max_fd = next;
  187. eloop.epoll_table = temp_table;
  188. }
  189. if (eloop.count + 1 > eloop.epoll_max_event_num) {
  190. next = eloop.epoll_max_event_num == 0 ? 8 :
  191. eloop.epoll_max_event_num * 2;
  192. temp_events = os_realloc_array(eloop.epoll_events, next,
  193. sizeof(struct epoll_event));
  194. if (temp_events == NULL) {
  195. wpa_printf(MSG_ERROR, "%s: malloc for epoll failed. "
  196. "%s\n", __func__, strerror(errno));
  197. return -1;
  198. }
  199. eloop.epoll_max_event_num = next;
  200. eloop.epoll_events = temp_events;
  201. }
  202. #endif /* CONFIG_ELOOP_EPOLL */
  203. eloop_trace_sock_remove_ref(table);
  204. tmp = os_realloc_array(table->table, table->count + 1,
  205. sizeof(struct eloop_sock));
  206. if (tmp == NULL) {
  207. eloop_trace_sock_add_ref(table);
  208. return -1;
  209. }
  210. tmp[table->count].sock = sock;
  211. tmp[table->count].eloop_data = eloop_data;
  212. tmp[table->count].user_data = user_data;
  213. tmp[table->count].handler = handler;
  214. wpa_trace_record(&tmp[table->count]);
  215. table->count++;
  216. table->table = tmp;
  217. eloop.max_sock = new_max_sock;
  218. eloop.count++;
  219. #ifndef CONFIG_ELOOP_EPOLL
  220. table->changed = 1;
  221. #endif /* CONFIG_ELOOP_EPOLL */
  222. eloop_trace_sock_add_ref(table);
  223. #ifdef CONFIG_ELOOP_EPOLL
  224. os_memset(&ev, 0, sizeof(ev));
  225. switch (table->type) {
  226. case EVENT_TYPE_READ:
  227. ev.events = EPOLLIN;
  228. break;
  229. case EVENT_TYPE_WRITE:
  230. ev.events = EPOLLOUT;
  231. break;
  232. /*
  233. * Exceptions are always checked when using epoll, but I suppose it's
  234. * possible that someone registered a socket *only* for exception
  235. * handling.
  236. */
  237. case EVENT_TYPE_EXCEPTION:
  238. ev.events = EPOLLERR | EPOLLHUP;
  239. break;
  240. }
  241. ev.data.fd = sock;
  242. if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
  243. wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d "
  244. "failed. %s\n", __func__, sock, strerror(errno));
  245. return -1;
  246. }
  247. os_memcpy(&eloop.epoll_table[sock], &table->table[table->count - 1],
  248. sizeof(struct eloop_sock));
  249. #endif /* CONFIG_ELOOP_EPOLL */
  250. return 0;
  251. }
  252. static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
  253. int sock)
  254. {
  255. int i;
  256. if (table == NULL || table->table == NULL || table->count == 0)
  257. return;
  258. for (i = 0; i < table->count; i++) {
  259. if (table->table[i].sock == sock)
  260. break;
  261. }
  262. if (i == table->count)
  263. return;
  264. eloop_trace_sock_remove_ref(table);
  265. if (i != table->count - 1) {
  266. os_memmove(&table->table[i], &table->table[i + 1],
  267. (table->count - i - 1) *
  268. sizeof(struct eloop_sock));
  269. }
  270. table->count--;
  271. eloop.count--;
  272. #ifndef CONFIG_ELOOP_EPOLL
  273. table->changed = 1;
  274. #endif /* CONFIG_ELOOP_EPOLL */
  275. eloop_trace_sock_add_ref(table);
  276. #ifdef CONFIG_ELOOP_EPOLL
  277. if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) {
  278. wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d "
  279. "failed. %s\n", __func__, sock, strerror(errno));
  280. return;
  281. }
  282. os_memset(&eloop.epoll_table[sock], 0, sizeof(struct eloop_sock));
  283. #endif /* CONFIG_ELOOP_EPOLL */
  284. }
  285. #ifdef CONFIG_ELOOP_POLL
  286. static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
  287. {
  288. if (fd < mx && fd >= 0)
  289. return pollfds_map[fd];
  290. return NULL;
  291. }
  292. static int eloop_sock_table_set_fds(struct eloop_sock_table *readers,
  293. struct eloop_sock_table *writers,
  294. struct eloop_sock_table *exceptions,
  295. struct pollfd *pollfds,
  296. struct pollfd **pollfds_map,
  297. int max_pollfd_map)
  298. {
  299. int i;
  300. int nxt = 0;
  301. int fd;
  302. struct pollfd *pfd;
  303. /* Clear pollfd lookup map. It will be re-populated below. */
  304. os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map);
  305. if (readers && readers->table) {
  306. for (i = 0; i < readers->count; i++) {
  307. fd = readers->table[i].sock;
  308. assert(fd >= 0 && fd < max_pollfd_map);
  309. pollfds[nxt].fd = fd;
  310. pollfds[nxt].events = POLLIN;
  311. pollfds[nxt].revents = 0;
  312. pollfds_map[fd] = &(pollfds[nxt]);
  313. nxt++;
  314. }
  315. }
  316. if (writers && writers->table) {
  317. for (i = 0; i < writers->count; i++) {
  318. /*
  319. * See if we already added this descriptor, update it
  320. * if so.
  321. */
  322. fd = writers->table[i].sock;
  323. assert(fd >= 0 && fd < max_pollfd_map);
  324. pfd = pollfds_map[fd];
  325. if (!pfd) {
  326. pfd = &(pollfds[nxt]);
  327. pfd->events = 0;
  328. pfd->fd = fd;
  329. pollfds[i].revents = 0;
  330. pollfds_map[fd] = pfd;
  331. nxt++;
  332. }
  333. pfd->events |= POLLOUT;
  334. }
  335. }
  336. /*
  337. * Exceptions are always checked when using poll, but I suppose it's
  338. * possible that someone registered a socket *only* for exception
  339. * handling. Set the POLLIN bit in this case.
  340. */
  341. if (exceptions && exceptions->table) {
  342. for (i = 0; i < exceptions->count; i++) {
  343. /*
  344. * See if we already added this descriptor, just use it
  345. * if so.
  346. */
  347. fd = exceptions->table[i].sock;
  348. assert(fd >= 0 && fd < max_pollfd_map);
  349. pfd = pollfds_map[fd];
  350. if (!pfd) {
  351. pfd = &(pollfds[nxt]);
  352. pfd->events = POLLIN;
  353. pfd->fd = fd;
  354. pollfds[i].revents = 0;
  355. pollfds_map[fd] = pfd;
  356. nxt++;
  357. }
  358. }
  359. }
  360. return nxt;
  361. }
  362. static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table,
  363. struct pollfd **pollfds_map,
  364. int max_pollfd_map,
  365. short int revents)
  366. {
  367. int i;
  368. struct pollfd *pfd;
  369. if (!table || !table->table)
  370. return 0;
  371. table->changed = 0;
  372. for (i = 0; i < table->count; i++) {
  373. pfd = find_pollfd(pollfds_map, table->table[i].sock,
  374. max_pollfd_map);
  375. if (!pfd)
  376. continue;
  377. if (!(pfd->revents & revents))
  378. continue;
  379. table->table[i].handler(table->table[i].sock,
  380. table->table[i].eloop_data,
  381. table->table[i].user_data);
  382. if (table->changed)
  383. return 1;
  384. }
  385. return 0;
  386. }
  387. static void eloop_sock_table_dispatch(struct eloop_sock_table *readers,
  388. struct eloop_sock_table *writers,
  389. struct eloop_sock_table *exceptions,
  390. struct pollfd **pollfds_map,
  391. int max_pollfd_map)
  392. {
  393. if (eloop_sock_table_dispatch_table(readers, pollfds_map,
  394. max_pollfd_map, POLLIN | POLLERR |
  395. POLLHUP))
  396. return; /* pollfds may be invalid at this point */
  397. if (eloop_sock_table_dispatch_table(writers, pollfds_map,
  398. max_pollfd_map, POLLOUT))
  399. return; /* pollfds may be invalid at this point */
  400. eloop_sock_table_dispatch_table(exceptions, pollfds_map,
  401. max_pollfd_map, POLLERR | POLLHUP);
  402. }
  403. #endif /* CONFIG_ELOOP_POLL */
  404. #ifdef CONFIG_ELOOP_SELECT
  405. static void eloop_sock_table_set_fds(struct eloop_sock_table *table,
  406. fd_set *fds)
  407. {
  408. int i;
  409. FD_ZERO(fds);
  410. if (table->table == NULL)
  411. return;
  412. for (i = 0; i < table->count; i++) {
  413. assert(table->table[i].sock >= 0);
  414. FD_SET(table->table[i].sock, fds);
  415. }
  416. }
  417. static void eloop_sock_table_dispatch(struct eloop_sock_table *table,
  418. fd_set *fds)
  419. {
  420. int i;
  421. if (table == NULL || table->table == NULL)
  422. return;
  423. table->changed = 0;
  424. for (i = 0; i < table->count; i++) {
  425. if (FD_ISSET(table->table[i].sock, fds)) {
  426. table->table[i].handler(table->table[i].sock,
  427. table->table[i].eloop_data,
  428. table->table[i].user_data);
  429. if (table->changed)
  430. break;
  431. }
  432. }
  433. }
  434. #endif /* CONFIG_ELOOP_SELECT */
  435. #ifdef CONFIG_ELOOP_EPOLL
  436. static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds)
  437. {
  438. struct eloop_sock *table;
  439. int i;
  440. for (i = 0; i < nfds; i++) {
  441. table = &eloop.epoll_table[events[i].data.fd];
  442. if (table->handler == NULL)
  443. continue;
  444. table->handler(table->sock, table->eloop_data,
  445. table->user_data);
  446. }
  447. }
  448. #endif /* CONFIG_ELOOP_EPOLL */
  449. static void eloop_sock_table_destroy(struct eloop_sock_table *table)
  450. {
  451. if (table) {
  452. int i;
  453. for (i = 0; i < table->count && table->table; i++) {
  454. wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
  455. "sock=%d eloop_data=%p user_data=%p "
  456. "handler=%p",
  457. table->table[i].sock,
  458. table->table[i].eloop_data,
  459. table->table[i].user_data,
  460. table->table[i].handler);
  461. wpa_trace_dump_funcname("eloop unregistered socket "
  462. "handler",
  463. table->table[i].handler);
  464. wpa_trace_dump("eloop sock", &table->table[i]);
  465. }
  466. os_free(table->table);
  467. }
  468. }
  469. int eloop_register_read_sock(int sock, eloop_sock_handler handler,
  470. void *eloop_data, void *user_data)
  471. {
  472. return eloop_register_sock(sock, EVENT_TYPE_READ, handler,
  473. eloop_data, user_data);
  474. }
  475. void eloop_unregister_read_sock(int sock)
  476. {
  477. eloop_unregister_sock(sock, EVENT_TYPE_READ);
  478. }
  479. static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type)
  480. {
  481. switch (type) {
  482. case EVENT_TYPE_READ:
  483. return &eloop.readers;
  484. case EVENT_TYPE_WRITE:
  485. return &eloop.writers;
  486. case EVENT_TYPE_EXCEPTION:
  487. return &eloop.exceptions;
  488. }
  489. return NULL;
  490. }
  491. int eloop_register_sock(int sock, eloop_event_type type,
  492. eloop_sock_handler handler,
  493. void *eloop_data, void *user_data)
  494. {
  495. struct eloop_sock_table *table;
  496. assert(sock >= 0);
  497. table = eloop_get_sock_table(type);
  498. return eloop_sock_table_add_sock(table, sock, handler,
  499. eloop_data, user_data);
  500. }
  501. void eloop_unregister_sock(int sock, eloop_event_type type)
  502. {
  503. struct eloop_sock_table *table;
  504. table = eloop_get_sock_table(type);
  505. eloop_sock_table_remove_sock(table, sock);
  506. }
  507. int eloop_register_timeout(unsigned int secs, unsigned int usecs,
  508. eloop_timeout_handler handler,
  509. void *eloop_data, void *user_data)
  510. {
  511. struct eloop_timeout *timeout, *tmp;
  512. os_time_t now_sec;
  513. timeout = os_zalloc(sizeof(*timeout));
  514. if (timeout == NULL)
  515. return -1;
  516. if (os_get_reltime(&timeout->time) < 0) {
  517. os_free(timeout);
  518. return -1;
  519. }
  520. now_sec = timeout->time.sec;
  521. timeout->time.sec += secs;
  522. if (timeout->time.sec < now_sec) {
  523. /*
  524. * Integer overflow - assume long enough timeout to be assumed
  525. * to be infinite, i.e., the timeout would never happen.
  526. */
  527. wpa_printf(MSG_DEBUG, "ELOOP: Too long timeout (secs=%u) to "
  528. "ever happen - ignore it", secs);
  529. os_free(timeout);
  530. return 0;
  531. }
  532. timeout->time.usec += usecs;
  533. while (timeout->time.usec >= 1000000) {
  534. timeout->time.sec++;
  535. timeout->time.usec -= 1000000;
  536. }
  537. timeout->eloop_data = eloop_data;
  538. timeout->user_data = user_data;
  539. timeout->handler = handler;
  540. wpa_trace_add_ref(timeout, eloop, eloop_data);
  541. wpa_trace_add_ref(timeout, user, user_data);
  542. wpa_trace_record(timeout);
  543. /* Maintain timeouts in order of increasing time */
  544. dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
  545. if (os_reltime_before(&timeout->time, &tmp->time)) {
  546. dl_list_add(tmp->list.prev, &timeout->list);
  547. return 0;
  548. }
  549. }
  550. dl_list_add_tail(&eloop.timeout, &timeout->list);
  551. return 0;
  552. }
  553. static void eloop_remove_timeout(struct eloop_timeout *timeout)
  554. {
  555. dl_list_del(&timeout->list);
  556. wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data);
  557. wpa_trace_remove_ref(timeout, user, timeout->user_data);
  558. os_free(timeout);
  559. }
  560. int eloop_cancel_timeout(eloop_timeout_handler handler,
  561. void *eloop_data, void *user_data)
  562. {
  563. struct eloop_timeout *timeout, *prev;
  564. int removed = 0;
  565. dl_list_for_each_safe(timeout, prev, &eloop.timeout,
  566. struct eloop_timeout, list) {
  567. if (timeout->handler == handler &&
  568. (timeout->eloop_data == eloop_data ||
  569. eloop_data == ELOOP_ALL_CTX) &&
  570. (timeout->user_data == user_data ||
  571. user_data == ELOOP_ALL_CTX)) {
  572. eloop_remove_timeout(timeout);
  573. removed++;
  574. }
  575. }
  576. return removed;
  577. }
  578. int eloop_cancel_timeout_one(eloop_timeout_handler handler,
  579. void *eloop_data, void *user_data,
  580. struct os_reltime *remaining)
  581. {
  582. struct eloop_timeout *timeout, *prev;
  583. int removed = 0;
  584. struct os_reltime now;
  585. os_get_reltime(&now);
  586. remaining->sec = remaining->usec = 0;
  587. dl_list_for_each_safe(timeout, prev, &eloop.timeout,
  588. struct eloop_timeout, list) {
  589. if (timeout->handler == handler &&
  590. (timeout->eloop_data == eloop_data) &&
  591. (timeout->user_data == user_data)) {
  592. removed = 1;
  593. if (os_reltime_before(&now, &timeout->time))
  594. os_reltime_sub(&timeout->time, &now, remaining);
  595. eloop_remove_timeout(timeout);
  596. break;
  597. }
  598. }
  599. return removed;
  600. }
  601. int eloop_is_timeout_registered(eloop_timeout_handler handler,
  602. void *eloop_data, void *user_data)
  603. {
  604. struct eloop_timeout *tmp;
  605. dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
  606. if (tmp->handler == handler &&
  607. tmp->eloop_data == eloop_data &&
  608. tmp->user_data == user_data)
  609. return 1;
  610. }
  611. return 0;
  612. }
  613. int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs,
  614. eloop_timeout_handler handler, void *eloop_data,
  615. void *user_data)
  616. {
  617. struct os_reltime now, requested, remaining;
  618. struct eloop_timeout *tmp;
  619. dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
  620. if (tmp->handler == handler &&
  621. tmp->eloop_data == eloop_data &&
  622. tmp->user_data == user_data) {
  623. requested.sec = req_secs;
  624. requested.usec = req_usecs;
  625. os_get_reltime(&now);
  626. os_reltime_sub(&tmp->time, &now, &remaining);
  627. if (os_reltime_before(&requested, &remaining)) {
  628. eloop_cancel_timeout(handler, eloop_data,
  629. user_data);
  630. eloop_register_timeout(requested.sec,
  631. requested.usec,
  632. handler, eloop_data,
  633. user_data);
  634. return 1;
  635. }
  636. return 0;
  637. }
  638. }
  639. return -1;
  640. }
  641. int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs,
  642. eloop_timeout_handler handler, void *eloop_data,
  643. void *user_data)
  644. {
  645. struct os_reltime now, requested, remaining;
  646. struct eloop_timeout *tmp;
  647. dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
  648. if (tmp->handler == handler &&
  649. tmp->eloop_data == eloop_data &&
  650. tmp->user_data == user_data) {
  651. requested.sec = req_secs;
  652. requested.usec = req_usecs;
  653. os_get_reltime(&now);
  654. os_reltime_sub(&tmp->time, &now, &remaining);
  655. if (os_reltime_before(&remaining, &requested)) {
  656. eloop_cancel_timeout(handler, eloop_data,
  657. user_data);
  658. eloop_register_timeout(requested.sec,
  659. requested.usec,
  660. handler, eloop_data,
  661. user_data);
  662. return 1;
  663. }
  664. return 0;
  665. }
  666. }
  667. return -1;
  668. }
  669. #ifndef CONFIG_NATIVE_WINDOWS
  670. static void eloop_handle_alarm(int sig)
  671. {
  672. wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in "
  673. "two seconds. Looks like there\n"
  674. "is a bug that ends up in a busy loop that "
  675. "prevents clean shutdown.\n"
  676. "Killing program forcefully.\n");
  677. exit(1);
  678. }
  679. #endif /* CONFIG_NATIVE_WINDOWS */
  680. static void eloop_handle_signal(int sig)
  681. {
  682. int i;
  683. #ifndef CONFIG_NATIVE_WINDOWS
  684. if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) {
  685. /* Use SIGALRM to break out from potential busy loops that
  686. * would not allow the program to be killed. */
  687. eloop.pending_terminate = 1;
  688. signal(SIGALRM, eloop_handle_alarm);
  689. alarm(2);
  690. }
  691. #endif /* CONFIG_NATIVE_WINDOWS */
  692. eloop.signaled++;
  693. for (i = 0; i < eloop.signal_count; i++) {
  694. if (eloop.signals[i].sig == sig) {
  695. eloop.signals[i].signaled++;
  696. break;
  697. }
  698. }
  699. }
  700. static void eloop_process_pending_signals(void)
  701. {
  702. int i;
  703. if (eloop.signaled == 0)
  704. return;
  705. eloop.signaled = 0;
  706. if (eloop.pending_terminate) {
  707. #ifndef CONFIG_NATIVE_WINDOWS
  708. alarm(0);
  709. #endif /* CONFIG_NATIVE_WINDOWS */
  710. eloop.pending_terminate = 0;
  711. }
  712. for (i = 0; i < eloop.signal_count; i++) {
  713. if (eloop.signals[i].signaled) {
  714. eloop.signals[i].signaled = 0;
  715. eloop.signals[i].handler(eloop.signals[i].sig,
  716. eloop.signals[i].user_data);
  717. }
  718. }
  719. }
  720. int eloop_register_signal(int sig, eloop_signal_handler handler,
  721. void *user_data)
  722. {
  723. struct eloop_signal *tmp;
  724. tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1,
  725. sizeof(struct eloop_signal));
  726. if (tmp == NULL)
  727. return -1;
  728. tmp[eloop.signal_count].sig = sig;
  729. tmp[eloop.signal_count].user_data = user_data;
  730. tmp[eloop.signal_count].handler = handler;
  731. tmp[eloop.signal_count].signaled = 0;
  732. eloop.signal_count++;
  733. eloop.signals = tmp;
  734. signal(sig, eloop_handle_signal);
  735. return 0;
  736. }
  737. int eloop_register_signal_terminate(eloop_signal_handler handler,
  738. void *user_data)
  739. {
  740. int ret = eloop_register_signal(SIGINT, handler, user_data);
  741. if (ret == 0)
  742. ret = eloop_register_signal(SIGTERM, handler, user_data);
  743. return ret;
  744. }
  745. int eloop_register_signal_reconfig(eloop_signal_handler handler,
  746. void *user_data)
  747. {
  748. #ifdef CONFIG_NATIVE_WINDOWS
  749. return 0;
  750. #else /* CONFIG_NATIVE_WINDOWS */
  751. return eloop_register_signal(SIGHUP, handler, user_data);
  752. #endif /* CONFIG_NATIVE_WINDOWS */
  753. }
  754. void eloop_run(void)
  755. {
  756. #ifdef CONFIG_ELOOP_POLL
  757. int num_poll_fds;
  758. int timeout_ms = 0;
  759. #endif /* CONFIG_ELOOP_POLL */
  760. #ifdef CONFIG_ELOOP_SELECT
  761. fd_set *rfds, *wfds, *efds;
  762. struct timeval _tv;
  763. #endif /* CONFIG_ELOOP_SELECT */
  764. #ifdef CONFIG_ELOOP_EPOLL
  765. int timeout_ms = -1;
  766. #endif /* CONFIG_ELOOP_EPOLL */
  767. int res;
  768. struct os_reltime tv, now;
  769. #ifdef CONFIG_ELOOP_SELECT
  770. rfds = os_malloc(sizeof(*rfds));
  771. wfds = os_malloc(sizeof(*wfds));
  772. efds = os_malloc(sizeof(*efds));
  773. if (rfds == NULL || wfds == NULL || efds == NULL)
  774. goto out;
  775. #endif /* CONFIG_ELOOP_SELECT */
  776. while (!eloop.terminate &&
  777. (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
  778. eloop.writers.count > 0 || eloop.exceptions.count > 0)) {
  779. struct eloop_timeout *timeout;
  780. timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
  781. list);
  782. if (timeout) {
  783. os_get_reltime(&now);
  784. if (os_reltime_before(&now, &timeout->time))
  785. os_reltime_sub(&timeout->time, &now, &tv);
  786. else
  787. tv.sec = tv.usec = 0;
  788. #if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL)
  789. timeout_ms = tv.sec * 1000 + tv.usec / 1000;
  790. #endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */
  791. #ifdef CONFIG_ELOOP_SELECT
  792. _tv.tv_sec = tv.sec;
  793. _tv.tv_usec = tv.usec;
  794. #endif /* CONFIG_ELOOP_SELECT */
  795. }
  796. #ifdef CONFIG_ELOOP_POLL
  797. num_poll_fds = eloop_sock_table_set_fds(
  798. &eloop.readers, &eloop.writers, &eloop.exceptions,
  799. eloop.pollfds, eloop.pollfds_map,
  800. eloop.max_pollfd_map);
  801. res = poll(eloop.pollfds, num_poll_fds,
  802. timeout ? timeout_ms : -1);
  803. #endif /* CONFIG_ELOOP_POLL */
  804. #ifdef CONFIG_ELOOP_SELECT
  805. eloop_sock_table_set_fds(&eloop.readers, rfds);
  806. eloop_sock_table_set_fds(&eloop.writers, wfds);
  807. eloop_sock_table_set_fds(&eloop.exceptions, efds);
  808. res = select(eloop.max_sock + 1, rfds, wfds, efds,
  809. timeout ? &_tv : NULL);
  810. #endif /* CONFIG_ELOOP_SELECT */
  811. #ifdef CONFIG_ELOOP_EPOLL
  812. if (eloop.count == 0) {
  813. res = 0;
  814. } else {
  815. res = epoll_wait(eloop.epollfd, eloop.epoll_events,
  816. eloop.count, timeout_ms);
  817. }
  818. #endif /* CONFIG_ELOOP_EPOLL */
  819. if (res < 0 && errno != EINTR && errno != 0) {
  820. wpa_printf(MSG_ERROR, "eloop: %s: %s",
  821. #ifdef CONFIG_ELOOP_POLL
  822. "poll"
  823. #endif /* CONFIG_ELOOP_POLL */
  824. #ifdef CONFIG_ELOOP_SELECT
  825. "select"
  826. #endif /* CONFIG_ELOOP_SELECT */
  827. #ifdef CONFIG_ELOOP_EPOLL
  828. "epoll"
  829. #endif /* CONFIG_ELOOP_EPOLL */
  830. , strerror(errno));
  831. goto out;
  832. }
  833. eloop_process_pending_signals();
  834. /* check if some registered timeouts have occurred */
  835. timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
  836. list);
  837. if (timeout) {
  838. os_get_reltime(&now);
  839. if (!os_reltime_before(&now, &timeout->time)) {
  840. void *eloop_data = timeout->eloop_data;
  841. void *user_data = timeout->user_data;
  842. eloop_timeout_handler handler =
  843. timeout->handler;
  844. eloop_remove_timeout(timeout);
  845. handler(eloop_data, user_data);
  846. }
  847. }
  848. if (res <= 0)
  849. continue;
  850. #ifdef CONFIG_ELOOP_POLL
  851. eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
  852. &eloop.exceptions, eloop.pollfds_map,
  853. eloop.max_pollfd_map);
  854. #endif /* CONFIG_ELOOP_POLL */
  855. #ifdef CONFIG_ELOOP_SELECT
  856. eloop_sock_table_dispatch(&eloop.readers, rfds);
  857. eloop_sock_table_dispatch(&eloop.writers, wfds);
  858. eloop_sock_table_dispatch(&eloop.exceptions, efds);
  859. #endif /* CONFIG_ELOOP_SELECT */
  860. #ifdef CONFIG_ELOOP_EPOLL
  861. eloop_sock_table_dispatch(eloop.epoll_events, res);
  862. #endif /* CONFIG_ELOOP_EPOLL */
  863. }
  864. eloop.terminate = 0;
  865. out:
  866. #ifdef CONFIG_ELOOP_SELECT
  867. os_free(rfds);
  868. os_free(wfds);
  869. os_free(efds);
  870. #endif /* CONFIG_ELOOP_SELECT */
  871. return;
  872. }
  873. void eloop_terminate(void)
  874. {
  875. eloop.terminate = 1;
  876. }
  877. void eloop_destroy(void)
  878. {
  879. struct eloop_timeout *timeout, *prev;
  880. struct os_reltime now;
  881. os_get_reltime(&now);
  882. dl_list_for_each_safe(timeout, prev, &eloop.timeout,
  883. struct eloop_timeout, list) {
  884. int sec, usec;
  885. sec = timeout->time.sec - now.sec;
  886. usec = timeout->time.usec - now.usec;
  887. if (timeout->time.usec < now.usec) {
  888. sec--;
  889. usec += 1000000;
  890. }
  891. wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d "
  892. "eloop_data=%p user_data=%p handler=%p",
  893. sec, usec, timeout->eloop_data, timeout->user_data,
  894. timeout->handler);
  895. wpa_trace_dump_funcname("eloop unregistered timeout handler",
  896. timeout->handler);
  897. wpa_trace_dump("eloop timeout", timeout);
  898. eloop_remove_timeout(timeout);
  899. }
  900. eloop_sock_table_destroy(&eloop.readers);
  901. eloop_sock_table_destroy(&eloop.writers);
  902. eloop_sock_table_destroy(&eloop.exceptions);
  903. os_free(eloop.signals);
  904. #ifdef CONFIG_ELOOP_POLL
  905. os_free(eloop.pollfds);
  906. os_free(eloop.pollfds_map);
  907. #endif /* CONFIG_ELOOP_POLL */
  908. #ifdef CONFIG_ELOOP_EPOLL
  909. os_free(eloop.epoll_table);
  910. os_free(eloop.epoll_events);
  911. close(eloop.epollfd);
  912. #endif /* CONFIG_ELOOP_EPOLL */
  913. }
  914. int eloop_terminated(void)
  915. {
  916. return eloop.terminate;
  917. }
  918. void eloop_wait_for_read_sock(int sock)
  919. {
  920. #ifdef CONFIG_ELOOP_POLL
  921. struct pollfd pfd;
  922. if (sock < 0)
  923. return;
  924. os_memset(&pfd, 0, sizeof(pfd));
  925. pfd.fd = sock;
  926. pfd.events = POLLIN;
  927. poll(&pfd, 1, -1);
  928. #endif /* CONFIG_ELOOP_POLL */
  929. #if defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL)
  930. /*
  931. * We can use epoll() here. But epoll() requres 4 system calls.
  932. * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for
  933. * epoll fd. So select() is better for performance here.
  934. */
  935. fd_set rfds;
  936. if (sock < 0)
  937. return;
  938. FD_ZERO(&rfds);
  939. FD_SET(sock, &rfds);
  940. select(sock + 1, &rfds, NULL, NULL, NULL);
  941. #endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */
  942. }
  943. #ifdef CONFIG_ELOOP_SELECT
  944. #undef CONFIG_ELOOP_SELECT
  945. #endif /* CONFIG_ELOOP_SELECT */