eloop.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344
  1. /*
  2. * Event loop based on select() loop
  3. * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
  4. *
  5. * This software may be distributed under the terms of the BSD license.
  6. * See README for more details.
  7. */
  8. #include "includes.h"
  9. #include <assert.h>
  10. #include "common.h"
  11. #include "trace.h"
  12. #include "list.h"
  13. #include "eloop.h"
  14. #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL)
  15. #error Do not define both of poll and epoll
  16. #endif
  17. #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_KQUEUE)
  18. #error Do not define both of poll and kqueue
  19. #endif
  20. #if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL) && \
  21. !defined(CONFIG_ELOOP_KQUEUE)
  22. #define CONFIG_ELOOP_SELECT
  23. #endif
  24. #ifdef CONFIG_ELOOP_POLL
  25. #include <poll.h>
  26. #endif /* CONFIG_ELOOP_POLL */
  27. #ifdef CONFIG_ELOOP_EPOLL
  28. #include <sys/epoll.h>
  29. #endif /* CONFIG_ELOOP_EPOLL */
  30. #ifdef CONFIG_ELOOP_KQUEUE
  31. #include <sys/event.h>
  32. #endif /* CONFIG_ELOOP_KQUEUE */
  33. struct eloop_sock {
  34. int sock;
  35. void *eloop_data;
  36. void *user_data;
  37. eloop_sock_handler handler;
  38. WPA_TRACE_REF(eloop);
  39. WPA_TRACE_REF(user);
  40. WPA_TRACE_INFO
  41. };
  42. struct eloop_timeout {
  43. struct dl_list list;
  44. struct os_reltime time;
  45. void *eloop_data;
  46. void *user_data;
  47. eloop_timeout_handler handler;
  48. WPA_TRACE_REF(eloop);
  49. WPA_TRACE_REF(user);
  50. WPA_TRACE_INFO
  51. };
  52. struct eloop_signal {
  53. int sig;
  54. void *user_data;
  55. eloop_signal_handler handler;
  56. int signaled;
  57. };
  58. struct eloop_sock_table {
  59. int count;
  60. struct eloop_sock *table;
  61. eloop_event_type type;
  62. int changed;
  63. };
  64. struct eloop_data {
  65. int max_sock;
  66. int count; /* sum of all table counts */
  67. #ifdef CONFIG_ELOOP_POLL
  68. int max_pollfd_map; /* number of pollfds_map currently allocated */
  69. int max_poll_fds; /* number of pollfds currently allocated */
  70. struct pollfd *pollfds;
  71. struct pollfd **pollfds_map;
  72. #endif /* CONFIG_ELOOP_POLL */
  73. #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
  74. int max_fd;
  75. struct eloop_sock *fd_table;
  76. #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
  77. #ifdef CONFIG_ELOOP_EPOLL
  78. int epollfd;
  79. int epoll_max_event_num;
  80. struct epoll_event *epoll_events;
  81. #endif /* CONFIG_ELOOP_EPOLL */
  82. #ifdef CONFIG_ELOOP_KQUEUE
  83. int kqueuefd;
  84. int kqueue_nevents;
  85. struct kevent *kqueue_events;
  86. #endif /* CONFIG_ELOOP_KQUEUE */
  87. struct eloop_sock_table readers;
  88. struct eloop_sock_table writers;
  89. struct eloop_sock_table exceptions;
  90. struct dl_list timeout;
  91. int signal_count;
  92. struct eloop_signal *signals;
  93. int signaled;
  94. int pending_terminate;
  95. int terminate;
  96. };
  97. static struct eloop_data eloop;
  98. #ifdef WPA_TRACE
  99. static void eloop_sigsegv_handler(int sig)
  100. {
  101. wpa_trace_show("eloop SIGSEGV");
  102. abort();
  103. }
  104. static void eloop_trace_sock_add_ref(struct eloop_sock_table *table)
  105. {
  106. int i;
  107. if (table == NULL || table->table == NULL)
  108. return;
  109. for (i = 0; i < table->count; i++) {
  110. wpa_trace_add_ref(&table->table[i], eloop,
  111. table->table[i].eloop_data);
  112. wpa_trace_add_ref(&table->table[i], user,
  113. table->table[i].user_data);
  114. }
  115. }
  116. static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table)
  117. {
  118. int i;
  119. if (table == NULL || table->table == NULL)
  120. return;
  121. for (i = 0; i < table->count; i++) {
  122. wpa_trace_remove_ref(&table->table[i], eloop,
  123. table->table[i].eloop_data);
  124. wpa_trace_remove_ref(&table->table[i], user,
  125. table->table[i].user_data);
  126. }
  127. }
  128. #else /* WPA_TRACE */
  129. #define eloop_trace_sock_add_ref(table) do { } while (0)
  130. #define eloop_trace_sock_remove_ref(table) do { } while (0)
  131. #endif /* WPA_TRACE */
  132. int eloop_init(void)
  133. {
  134. os_memset(&eloop, 0, sizeof(eloop));
  135. dl_list_init(&eloop.timeout);
  136. #ifdef CONFIG_ELOOP_EPOLL
  137. eloop.epollfd = epoll_create1(0);
  138. if (eloop.epollfd < 0) {
  139. wpa_printf(MSG_ERROR, "%s: epoll_create1 failed. %s",
  140. __func__, strerror(errno));
  141. return -1;
  142. }
  143. #endif /* CONFIG_ELOOP_EPOLL */
  144. #ifdef CONFIG_ELOOP_KQUEUE
  145. eloop.kqueuefd = kqueue();
  146. if (eloop.kqueuefd < 0) {
  147. wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
  148. __func__, strerror(errno));
  149. return -1;
  150. }
  151. #endif /* CONFIG_ELOOP_KQUEUE */
  152. #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
  153. eloop.readers.type = EVENT_TYPE_READ;
  154. eloop.writers.type = EVENT_TYPE_WRITE;
  155. eloop.exceptions.type = EVENT_TYPE_EXCEPTION;
  156. #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
  157. #ifdef WPA_TRACE
  158. signal(SIGSEGV, eloop_sigsegv_handler);
  159. #endif /* WPA_TRACE */
  160. return 0;
  161. }
  162. #ifdef CONFIG_ELOOP_EPOLL
  163. static int eloop_sock_queue(int sock, eloop_event_type type)
  164. {
  165. struct epoll_event ev;
  166. os_memset(&ev, 0, sizeof(ev));
  167. switch (type) {
  168. case EVENT_TYPE_READ:
  169. ev.events = EPOLLIN;
  170. break;
  171. case EVENT_TYPE_WRITE:
  172. ev.events = EPOLLOUT;
  173. break;
  174. /*
  175. * Exceptions are always checked when using epoll, but I suppose it's
  176. * possible that someone registered a socket *only* for exception
  177. * handling.
  178. */
  179. case EVENT_TYPE_EXCEPTION:
  180. ev.events = EPOLLERR | EPOLLHUP;
  181. break;
  182. }
  183. ev.data.fd = sock;
  184. if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
  185. wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d failed: %s",
  186. __func__, sock, strerror(errno));
  187. return -1;
  188. }
  189. return 0;
  190. }
  191. #endif /* CONFIG_ELOOP_EPOLL */
  192. #ifdef CONFIG_ELOOP_KQUEUE
  193. static int eloop_sock_queue(int sock, eloop_event_type type)
  194. {
  195. int filter;
  196. struct kevent ke;
  197. switch (type) {
  198. case EVENT_TYPE_READ:
  199. filter = EVFILT_READ;
  200. break;
  201. case EVENT_TYPE_WRITE:
  202. filter = EVFILT_WRITE;
  203. break;
  204. default:
  205. filter = 0;
  206. }
  207. EV_SET(&ke, sock, filter, EV_ADD, 0, 0, 0);
  208. if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) == -1) {
  209. wpa_printf(MSG_ERROR, "%s: kevent(ADD) for fd=%d failed: %s",
  210. __func__, sock, strerror(errno));
  211. return -1;
  212. }
  213. return 0;
  214. }
  215. #endif /* CONFIG_ELOOP_KQUEUE */
  216. static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
  217. int sock, eloop_sock_handler handler,
  218. void *eloop_data, void *user_data)
  219. {
  220. #ifdef CONFIG_ELOOP_EPOLL
  221. struct epoll_event *temp_events;
  222. #endif /* CONFIG_ELOOP_EPOLL */
  223. #ifdef CONFIG_ELOOP_KQUEUE
  224. struct kevent *temp_events;
  225. #endif /* CONFIG_ELOOP_EPOLL */
  226. #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
  227. struct eloop_sock *temp_table;
  228. int next;
  229. #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
  230. struct eloop_sock *tmp;
  231. int new_max_sock;
  232. if (sock > eloop.max_sock)
  233. new_max_sock = sock;
  234. else
  235. new_max_sock = eloop.max_sock;
  236. if (table == NULL)
  237. return -1;
  238. #ifdef CONFIG_ELOOP_POLL
  239. if (new_max_sock >= eloop.max_pollfd_map) {
  240. struct pollfd **nmap;
  241. nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50,
  242. sizeof(struct pollfd *));
  243. if (nmap == NULL)
  244. return -1;
  245. eloop.max_pollfd_map = new_max_sock + 50;
  246. eloop.pollfds_map = nmap;
  247. }
  248. if (eloop.count + 1 > eloop.max_poll_fds) {
  249. struct pollfd *n;
  250. int nmax = eloop.count + 1 + 50;
  251. n = os_realloc_array(eloop.pollfds, nmax,
  252. sizeof(struct pollfd));
  253. if (n == NULL)
  254. return -1;
  255. eloop.max_poll_fds = nmax;
  256. eloop.pollfds = n;
  257. }
  258. #endif /* CONFIG_ELOOP_POLL */
  259. #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
  260. if (new_max_sock >= eloop.max_fd) {
  261. next = eloop.max_fd == 0 ? 16 : eloop.max_fd * 2;
  262. temp_table = os_realloc_array(eloop.fd_table, next,
  263. sizeof(struct eloop_sock));
  264. if (temp_table == NULL)
  265. return -1;
  266. eloop.max_fd = next;
  267. eloop.fd_table = temp_table;
  268. }
  269. #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
  270. #ifdef CONFIG_ELOOP_EPOLL
  271. if (eloop.count + 1 > eloop.epoll_max_event_num) {
  272. next = eloop.epoll_max_event_num == 0 ? 8 :
  273. eloop.epoll_max_event_num * 2;
  274. temp_events = os_realloc_array(eloop.epoll_events, next,
  275. sizeof(struct epoll_event));
  276. if (temp_events == NULL) {
  277. wpa_printf(MSG_ERROR, "%s: malloc for epoll failed: %s",
  278. __func__, strerror(errno));
  279. return -1;
  280. }
  281. eloop.epoll_max_event_num = next;
  282. eloop.epoll_events = temp_events;
  283. }
  284. #endif /* CONFIG_ELOOP_EPOLL */
  285. #ifdef CONFIG_ELOOP_KQUEUE
  286. if (eloop.count + 1 > eloop.kqueue_nevents) {
  287. next = eloop.kqueue_nevents == 0 ? 8 : eloop.kqueue_nevents * 2;
  288. temp_events = os_malloc(next * sizeof(*temp_events));
  289. if (!temp_events) {
  290. wpa_printf(MSG_ERROR,
  291. "%s: malloc for kqueue failed: %s",
  292. __func__, strerror(errno));
  293. return -1;
  294. }
  295. os_free(eloop.kqueue_events);
  296. eloop.kqueue_events = temp_events;
  297. eloop.kqueue_nevents = next;
  298. }
  299. #endif /* CONFIG_ELOOP_KQUEUE */
  300. eloop_trace_sock_remove_ref(table);
  301. tmp = os_realloc_array(table->table, table->count + 1,
  302. sizeof(struct eloop_sock));
  303. if (tmp == NULL) {
  304. eloop_trace_sock_add_ref(table);
  305. return -1;
  306. }
  307. tmp[table->count].sock = sock;
  308. tmp[table->count].eloop_data = eloop_data;
  309. tmp[table->count].user_data = user_data;
  310. tmp[table->count].handler = handler;
  311. wpa_trace_record(&tmp[table->count]);
  312. table->count++;
  313. table->table = tmp;
  314. eloop.max_sock = new_max_sock;
  315. eloop.count++;
  316. table->changed = 1;
  317. eloop_trace_sock_add_ref(table);
  318. #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
  319. if (eloop_sock_queue(sock, table->type) < 0)
  320. return -1;
  321. os_memcpy(&eloop.fd_table[sock], &table->table[table->count - 1],
  322. sizeof(struct eloop_sock));
  323. #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
  324. return 0;
  325. }
  326. static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
  327. int sock)
  328. {
  329. #ifdef CONFIG_ELOOP_KQUEUE
  330. struct kevent ke;
  331. #endif /* CONFIG_ELOOP_KQUEUE */
  332. int i;
  333. if (table == NULL || table->table == NULL || table->count == 0)
  334. return;
  335. for (i = 0; i < table->count; i++) {
  336. if (table->table[i].sock == sock)
  337. break;
  338. }
  339. if (i == table->count)
  340. return;
  341. eloop_trace_sock_remove_ref(table);
  342. if (i != table->count - 1) {
  343. os_memmove(&table->table[i], &table->table[i + 1],
  344. (table->count - i - 1) *
  345. sizeof(struct eloop_sock));
  346. }
  347. table->count--;
  348. eloop.count--;
  349. table->changed = 1;
  350. eloop_trace_sock_add_ref(table);
  351. #ifdef CONFIG_ELOOP_EPOLL
  352. if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) {
  353. wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d failed: %s",
  354. __func__, sock, strerror(errno));
  355. return;
  356. }
  357. os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
  358. #endif /* CONFIG_ELOOP_EPOLL */
  359. #ifdef CONFIG_ELOOP_KQUEUE
  360. EV_SET(&ke, sock, 0, EV_DELETE, 0, 0, 0);
  361. if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) < 0) {
  362. wpa_printf(MSG_ERROR, "%s: kevent(DEL) for fd=%d failed: %s",
  363. __func__, sock, strerror(errno));
  364. return;
  365. }
  366. os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
  367. #endif /* CONFIG_ELOOP_KQUEUE */
  368. }
  369. #ifdef CONFIG_ELOOP_POLL
  370. static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
  371. {
  372. if (fd < mx && fd >= 0)
  373. return pollfds_map[fd];
  374. return NULL;
  375. }
  376. static int eloop_sock_table_set_fds(struct eloop_sock_table *readers,
  377. struct eloop_sock_table *writers,
  378. struct eloop_sock_table *exceptions,
  379. struct pollfd *pollfds,
  380. struct pollfd **pollfds_map,
  381. int max_pollfd_map)
  382. {
  383. int i;
  384. int nxt = 0;
  385. int fd;
  386. struct pollfd *pfd;
  387. /* Clear pollfd lookup map. It will be re-populated below. */
  388. os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map);
  389. if (readers && readers->table) {
  390. for (i = 0; i < readers->count; i++) {
  391. fd = readers->table[i].sock;
  392. assert(fd >= 0 && fd < max_pollfd_map);
  393. pollfds[nxt].fd = fd;
  394. pollfds[nxt].events = POLLIN;
  395. pollfds[nxt].revents = 0;
  396. pollfds_map[fd] = &(pollfds[nxt]);
  397. nxt++;
  398. }
  399. }
  400. if (writers && writers->table) {
  401. for (i = 0; i < writers->count; i++) {
  402. /*
  403. * See if we already added this descriptor, update it
  404. * if so.
  405. */
  406. fd = writers->table[i].sock;
  407. assert(fd >= 0 && fd < max_pollfd_map);
  408. pfd = pollfds_map[fd];
  409. if (!pfd) {
  410. pfd = &(pollfds[nxt]);
  411. pfd->events = 0;
  412. pfd->fd = fd;
  413. pollfds[i].revents = 0;
  414. pollfds_map[fd] = pfd;
  415. nxt++;
  416. }
  417. pfd->events |= POLLOUT;
  418. }
  419. }
  420. /*
  421. * Exceptions are always checked when using poll, but I suppose it's
  422. * possible that someone registered a socket *only* for exception
  423. * handling. Set the POLLIN bit in this case.
  424. */
  425. if (exceptions && exceptions->table) {
  426. for (i = 0; i < exceptions->count; i++) {
  427. /*
  428. * See if we already added this descriptor, just use it
  429. * if so.
  430. */
  431. fd = exceptions->table[i].sock;
  432. assert(fd >= 0 && fd < max_pollfd_map);
  433. pfd = pollfds_map[fd];
  434. if (!pfd) {
  435. pfd = &(pollfds[nxt]);
  436. pfd->events = POLLIN;
  437. pfd->fd = fd;
  438. pollfds[i].revents = 0;
  439. pollfds_map[fd] = pfd;
  440. nxt++;
  441. }
  442. }
  443. }
  444. return nxt;
  445. }
  446. static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table,
  447. struct pollfd **pollfds_map,
  448. int max_pollfd_map,
  449. short int revents)
  450. {
  451. int i;
  452. struct pollfd *pfd;
  453. if (!table || !table->table)
  454. return 0;
  455. table->changed = 0;
  456. for (i = 0; i < table->count; i++) {
  457. pfd = find_pollfd(pollfds_map, table->table[i].sock,
  458. max_pollfd_map);
  459. if (!pfd)
  460. continue;
  461. if (!(pfd->revents & revents))
  462. continue;
  463. table->table[i].handler(table->table[i].sock,
  464. table->table[i].eloop_data,
  465. table->table[i].user_data);
  466. if (table->changed)
  467. return 1;
  468. }
  469. return 0;
  470. }
  471. static void eloop_sock_table_dispatch(struct eloop_sock_table *readers,
  472. struct eloop_sock_table *writers,
  473. struct eloop_sock_table *exceptions,
  474. struct pollfd **pollfds_map,
  475. int max_pollfd_map)
  476. {
  477. if (eloop_sock_table_dispatch_table(readers, pollfds_map,
  478. max_pollfd_map, POLLIN | POLLERR |
  479. POLLHUP))
  480. return; /* pollfds may be invalid at this point */
  481. if (eloop_sock_table_dispatch_table(writers, pollfds_map,
  482. max_pollfd_map, POLLOUT))
  483. return; /* pollfds may be invalid at this point */
  484. eloop_sock_table_dispatch_table(exceptions, pollfds_map,
  485. max_pollfd_map, POLLERR | POLLHUP);
  486. }
  487. #endif /* CONFIG_ELOOP_POLL */
  488. #ifdef CONFIG_ELOOP_SELECT
  489. static void eloop_sock_table_set_fds(struct eloop_sock_table *table,
  490. fd_set *fds)
  491. {
  492. int i;
  493. FD_ZERO(fds);
  494. if (table->table == NULL)
  495. return;
  496. for (i = 0; i < table->count; i++) {
  497. assert(table->table[i].sock >= 0);
  498. FD_SET(table->table[i].sock, fds);
  499. }
  500. }
  501. static void eloop_sock_table_dispatch(struct eloop_sock_table *table,
  502. fd_set *fds)
  503. {
  504. int i;
  505. if (table == NULL || table->table == NULL)
  506. return;
  507. table->changed = 0;
  508. for (i = 0; i < table->count; i++) {
  509. if (FD_ISSET(table->table[i].sock, fds)) {
  510. table->table[i].handler(table->table[i].sock,
  511. table->table[i].eloop_data,
  512. table->table[i].user_data);
  513. if (table->changed)
  514. break;
  515. }
  516. }
  517. }
  518. #endif /* CONFIG_ELOOP_SELECT */
  519. #ifdef CONFIG_ELOOP_EPOLL
  520. static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds)
  521. {
  522. struct eloop_sock *table;
  523. int i;
  524. for (i = 0; i < nfds; i++) {
  525. table = &eloop.fd_table[events[i].data.fd];
  526. if (table->handler == NULL)
  527. continue;
  528. table->handler(table->sock, table->eloop_data,
  529. table->user_data);
  530. if (eloop.readers.changed ||
  531. eloop.writers.changed ||
  532. eloop.exceptions.changed)
  533. break;
  534. }
  535. }
  536. #endif /* CONFIG_ELOOP_EPOLL */
  537. #ifdef CONFIG_ELOOP_KQUEUE
  538. static void eloop_sock_table_dispatch(struct kevent *events, int nfds)
  539. {
  540. struct eloop_sock *table;
  541. int i;
  542. for (i = 0; i < nfds; i++) {
  543. table = &eloop.fd_table[events[i].ident];
  544. if (table->handler == NULL)
  545. continue;
  546. table->handler(table->sock, table->eloop_data,
  547. table->user_data);
  548. if (eloop.readers.changed ||
  549. eloop.writers.changed ||
  550. eloop.exceptions.changed)
  551. break;
  552. }
  553. }
  554. static int eloop_sock_table_requeue(struct eloop_sock_table *table)
  555. {
  556. int i, r;
  557. r = 0;
  558. for (i = 0; i < table->count && table->table; i++) {
  559. if (eloop_sock_queue(table->table[i].sock, table->type) == -1)
  560. r = -1;
  561. }
  562. return r;
  563. }
  564. #endif /* CONFIG_ELOOP_KQUEUE */
  565. int eloop_sock_requeue(void)
  566. {
  567. int r = 0;
  568. #ifdef CONFIG_ELOOP_KQUEUE
  569. close(eloop.kqueuefd);
  570. eloop.kqueuefd = kqueue();
  571. if (eloop.kqueuefd < 0) {
  572. wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
  573. __func__, strerror(errno));
  574. return -1;
  575. }
  576. if (eloop_sock_table_requeue(&eloop.readers) < 0)
  577. r = -1;
  578. if (eloop_sock_table_requeue(&eloop.writers) < 0)
  579. r = -1;
  580. if (eloop_sock_table_requeue(&eloop.exceptions) < 0)
  581. r = -1;
  582. #endif /* CONFIG_ELOOP_KQUEUE */
  583. return r;
  584. }
  585. static void eloop_sock_table_destroy(struct eloop_sock_table *table)
  586. {
  587. if (table) {
  588. int i;
  589. for (i = 0; i < table->count && table->table; i++) {
  590. wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
  591. "sock=%d eloop_data=%p user_data=%p "
  592. "handler=%p",
  593. table->table[i].sock,
  594. table->table[i].eloop_data,
  595. table->table[i].user_data,
  596. table->table[i].handler);
  597. wpa_trace_dump_funcname("eloop unregistered socket "
  598. "handler",
  599. table->table[i].handler);
  600. wpa_trace_dump("eloop sock", &table->table[i]);
  601. }
  602. os_free(table->table);
  603. }
  604. }
  605. int eloop_register_read_sock(int sock, eloop_sock_handler handler,
  606. void *eloop_data, void *user_data)
  607. {
  608. return eloop_register_sock(sock, EVENT_TYPE_READ, handler,
  609. eloop_data, user_data);
  610. }
  611. void eloop_unregister_read_sock(int sock)
  612. {
  613. eloop_unregister_sock(sock, EVENT_TYPE_READ);
  614. }
  615. static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type)
  616. {
  617. switch (type) {
  618. case EVENT_TYPE_READ:
  619. return &eloop.readers;
  620. case EVENT_TYPE_WRITE:
  621. return &eloop.writers;
  622. case EVENT_TYPE_EXCEPTION:
  623. return &eloop.exceptions;
  624. }
  625. return NULL;
  626. }
  627. int eloop_register_sock(int sock, eloop_event_type type,
  628. eloop_sock_handler handler,
  629. void *eloop_data, void *user_data)
  630. {
  631. struct eloop_sock_table *table;
  632. assert(sock >= 0);
  633. table = eloop_get_sock_table(type);
  634. return eloop_sock_table_add_sock(table, sock, handler,
  635. eloop_data, user_data);
  636. }
  637. void eloop_unregister_sock(int sock, eloop_event_type type)
  638. {
  639. struct eloop_sock_table *table;
  640. table = eloop_get_sock_table(type);
  641. eloop_sock_table_remove_sock(table, sock);
  642. }
  643. int eloop_register_timeout(unsigned int secs, unsigned int usecs,
  644. eloop_timeout_handler handler,
  645. void *eloop_data, void *user_data)
  646. {
  647. struct eloop_timeout *timeout, *tmp;
  648. os_time_t now_sec;
  649. timeout = os_zalloc(sizeof(*timeout));
  650. if (timeout == NULL)
  651. return -1;
  652. if (os_get_reltime(&timeout->time) < 0) {
  653. os_free(timeout);
  654. return -1;
  655. }
  656. now_sec = timeout->time.sec;
  657. timeout->time.sec += secs;
  658. if (timeout->time.sec < now_sec) {
  659. /*
  660. * Integer overflow - assume long enough timeout to be assumed
  661. * to be infinite, i.e., the timeout would never happen.
  662. */
  663. wpa_printf(MSG_DEBUG, "ELOOP: Too long timeout (secs=%u) to "
  664. "ever happen - ignore it", secs);
  665. os_free(timeout);
  666. return 0;
  667. }
  668. timeout->time.usec += usecs;
  669. while (timeout->time.usec >= 1000000) {
  670. timeout->time.sec++;
  671. timeout->time.usec -= 1000000;
  672. }
  673. timeout->eloop_data = eloop_data;
  674. timeout->user_data = user_data;
  675. timeout->handler = handler;
  676. wpa_trace_add_ref(timeout, eloop, eloop_data);
  677. wpa_trace_add_ref(timeout, user, user_data);
  678. wpa_trace_record(timeout);
  679. /* Maintain timeouts in order of increasing time */
  680. dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
  681. if (os_reltime_before(&timeout->time, &tmp->time)) {
  682. dl_list_add(tmp->list.prev, &timeout->list);
  683. return 0;
  684. }
  685. }
  686. dl_list_add_tail(&eloop.timeout, &timeout->list);
  687. return 0;
  688. }
  689. static void eloop_remove_timeout(struct eloop_timeout *timeout)
  690. {
  691. dl_list_del(&timeout->list);
  692. wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data);
  693. wpa_trace_remove_ref(timeout, user, timeout->user_data);
  694. os_free(timeout);
  695. }
  696. int eloop_cancel_timeout(eloop_timeout_handler handler,
  697. void *eloop_data, void *user_data)
  698. {
  699. struct eloop_timeout *timeout, *prev;
  700. int removed = 0;
  701. dl_list_for_each_safe(timeout, prev, &eloop.timeout,
  702. struct eloop_timeout, list) {
  703. if (timeout->handler == handler &&
  704. (timeout->eloop_data == eloop_data ||
  705. eloop_data == ELOOP_ALL_CTX) &&
  706. (timeout->user_data == user_data ||
  707. user_data == ELOOP_ALL_CTX)) {
  708. eloop_remove_timeout(timeout);
  709. removed++;
  710. }
  711. }
  712. return removed;
  713. }
  714. int eloop_cancel_timeout_one(eloop_timeout_handler handler,
  715. void *eloop_data, void *user_data,
  716. struct os_reltime *remaining)
  717. {
  718. struct eloop_timeout *timeout, *prev;
  719. int removed = 0;
  720. struct os_reltime now;
  721. os_get_reltime(&now);
  722. remaining->sec = remaining->usec = 0;
  723. dl_list_for_each_safe(timeout, prev, &eloop.timeout,
  724. struct eloop_timeout, list) {
  725. if (timeout->handler == handler &&
  726. (timeout->eloop_data == eloop_data) &&
  727. (timeout->user_data == user_data)) {
  728. removed = 1;
  729. if (os_reltime_before(&now, &timeout->time))
  730. os_reltime_sub(&timeout->time, &now, remaining);
  731. eloop_remove_timeout(timeout);
  732. break;
  733. }
  734. }
  735. return removed;
  736. }
  737. int eloop_is_timeout_registered(eloop_timeout_handler handler,
  738. void *eloop_data, void *user_data)
  739. {
  740. struct eloop_timeout *tmp;
  741. dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
  742. if (tmp->handler == handler &&
  743. tmp->eloop_data == eloop_data &&
  744. tmp->user_data == user_data)
  745. return 1;
  746. }
  747. return 0;
  748. }
  749. int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs,
  750. eloop_timeout_handler handler, void *eloop_data,
  751. void *user_data)
  752. {
  753. struct os_reltime now, requested, remaining;
  754. struct eloop_timeout *tmp;
  755. dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
  756. if (tmp->handler == handler &&
  757. tmp->eloop_data == eloop_data &&
  758. tmp->user_data == user_data) {
  759. requested.sec = req_secs;
  760. requested.usec = req_usecs;
  761. os_get_reltime(&now);
  762. os_reltime_sub(&tmp->time, &now, &remaining);
  763. if (os_reltime_before(&requested, &remaining)) {
  764. eloop_cancel_timeout(handler, eloop_data,
  765. user_data);
  766. eloop_register_timeout(requested.sec,
  767. requested.usec,
  768. handler, eloop_data,
  769. user_data);
  770. return 1;
  771. }
  772. return 0;
  773. }
  774. }
  775. return -1;
  776. }
  777. int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs,
  778. eloop_timeout_handler handler, void *eloop_data,
  779. void *user_data)
  780. {
  781. struct os_reltime now, requested, remaining;
  782. struct eloop_timeout *tmp;
  783. dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
  784. if (tmp->handler == handler &&
  785. tmp->eloop_data == eloop_data &&
  786. tmp->user_data == user_data) {
  787. requested.sec = req_secs;
  788. requested.usec = req_usecs;
  789. os_get_reltime(&now);
  790. os_reltime_sub(&tmp->time, &now, &remaining);
  791. if (os_reltime_before(&remaining, &requested)) {
  792. eloop_cancel_timeout(handler, eloop_data,
  793. user_data);
  794. eloop_register_timeout(requested.sec,
  795. requested.usec,
  796. handler, eloop_data,
  797. user_data);
  798. return 1;
  799. }
  800. return 0;
  801. }
  802. }
  803. return -1;
  804. }
  805. #ifndef CONFIG_NATIVE_WINDOWS
  806. static void eloop_handle_alarm(int sig)
  807. {
  808. wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in "
  809. "two seconds. Looks like there\n"
  810. "is a bug that ends up in a busy loop that "
  811. "prevents clean shutdown.\n"
  812. "Killing program forcefully.\n");
  813. exit(1);
  814. }
  815. #endif /* CONFIG_NATIVE_WINDOWS */
  816. static void eloop_handle_signal(int sig)
  817. {
  818. int i;
  819. #ifndef CONFIG_NATIVE_WINDOWS
  820. if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) {
  821. /* Use SIGALRM to break out from potential busy loops that
  822. * would not allow the program to be killed. */
  823. eloop.pending_terminate = 1;
  824. signal(SIGALRM, eloop_handle_alarm);
  825. alarm(2);
  826. }
  827. #endif /* CONFIG_NATIVE_WINDOWS */
  828. eloop.signaled++;
  829. for (i = 0; i < eloop.signal_count; i++) {
  830. if (eloop.signals[i].sig == sig) {
  831. eloop.signals[i].signaled++;
  832. break;
  833. }
  834. }
  835. }
  836. static void eloop_process_pending_signals(void)
  837. {
  838. int i;
  839. if (eloop.signaled == 0)
  840. return;
  841. eloop.signaled = 0;
  842. if (eloop.pending_terminate) {
  843. #ifndef CONFIG_NATIVE_WINDOWS
  844. alarm(0);
  845. #endif /* CONFIG_NATIVE_WINDOWS */
  846. eloop.pending_terminate = 0;
  847. }
  848. for (i = 0; i < eloop.signal_count; i++) {
  849. if (eloop.signals[i].signaled) {
  850. eloop.signals[i].signaled = 0;
  851. eloop.signals[i].handler(eloop.signals[i].sig,
  852. eloop.signals[i].user_data);
  853. }
  854. }
  855. }
  856. int eloop_register_signal(int sig, eloop_signal_handler handler,
  857. void *user_data)
  858. {
  859. struct eloop_signal *tmp;
  860. tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1,
  861. sizeof(struct eloop_signal));
  862. if (tmp == NULL)
  863. return -1;
  864. tmp[eloop.signal_count].sig = sig;
  865. tmp[eloop.signal_count].user_data = user_data;
  866. tmp[eloop.signal_count].handler = handler;
  867. tmp[eloop.signal_count].signaled = 0;
  868. eloop.signal_count++;
  869. eloop.signals = tmp;
  870. signal(sig, eloop_handle_signal);
  871. return 0;
  872. }
  873. int eloop_register_signal_terminate(eloop_signal_handler handler,
  874. void *user_data)
  875. {
  876. int ret = eloop_register_signal(SIGINT, handler, user_data);
  877. if (ret == 0)
  878. ret = eloop_register_signal(SIGTERM, handler, user_data);
  879. return ret;
  880. }
  881. int eloop_register_signal_reconfig(eloop_signal_handler handler,
  882. void *user_data)
  883. {
  884. #ifdef CONFIG_NATIVE_WINDOWS
  885. return 0;
  886. #else /* CONFIG_NATIVE_WINDOWS */
  887. return eloop_register_signal(SIGHUP, handler, user_data);
  888. #endif /* CONFIG_NATIVE_WINDOWS */
  889. }
  890. void eloop_run(void)
  891. {
  892. #ifdef CONFIG_ELOOP_POLL
  893. int num_poll_fds;
  894. int timeout_ms = 0;
  895. #endif /* CONFIG_ELOOP_POLL */
  896. #ifdef CONFIG_ELOOP_SELECT
  897. fd_set *rfds, *wfds, *efds;
  898. struct timeval _tv;
  899. #endif /* CONFIG_ELOOP_SELECT */
  900. #ifdef CONFIG_ELOOP_EPOLL
  901. int timeout_ms = -1;
  902. #endif /* CONFIG_ELOOP_EPOLL */
  903. #ifdef CONFIG_ELOOP_KQUEUE
  904. struct timespec ts;
  905. #endif /* CONFIG_ELOOP_KQUEUE */
  906. int res;
  907. struct os_reltime tv, now;
  908. #ifdef CONFIG_ELOOP_SELECT
  909. rfds = os_malloc(sizeof(*rfds));
  910. wfds = os_malloc(sizeof(*wfds));
  911. efds = os_malloc(sizeof(*efds));
  912. if (rfds == NULL || wfds == NULL || efds == NULL)
  913. goto out;
  914. #endif /* CONFIG_ELOOP_SELECT */
  915. while (!eloop.terminate &&
  916. (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
  917. eloop.writers.count > 0 || eloop.exceptions.count > 0)) {
  918. struct eloop_timeout *timeout;
  919. if (eloop.pending_terminate) {
  920. /*
  921. * This may happen in some corner cases where a signal
  922. * is received during a blocking operation. We need to
  923. * process the pending signals and exit if requested to
  924. * avoid hitting the SIGALRM limit if the blocking
  925. * operation took more than two seconds.
  926. */
  927. eloop_process_pending_signals();
  928. if (eloop.terminate)
  929. break;
  930. }
  931. timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
  932. list);
  933. if (timeout) {
  934. os_get_reltime(&now);
  935. if (os_reltime_before(&now, &timeout->time))
  936. os_reltime_sub(&timeout->time, &now, &tv);
  937. else
  938. tv.sec = tv.usec = 0;
  939. #if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL)
  940. timeout_ms = tv.sec * 1000 + tv.usec / 1000;
  941. #endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */
  942. #ifdef CONFIG_ELOOP_SELECT
  943. _tv.tv_sec = tv.sec;
  944. _tv.tv_usec = tv.usec;
  945. #endif /* CONFIG_ELOOP_SELECT */
  946. #ifdef CONFIG_ELOOP_KQUEUE
  947. ts.tv_sec = tv.sec;
  948. ts.tv_nsec = tv.usec * 1000L;
  949. #endif /* CONFIG_ELOOP_KQUEUE */
  950. }
  951. #ifdef CONFIG_ELOOP_POLL
  952. num_poll_fds = eloop_sock_table_set_fds(
  953. &eloop.readers, &eloop.writers, &eloop.exceptions,
  954. eloop.pollfds, eloop.pollfds_map,
  955. eloop.max_pollfd_map);
  956. res = poll(eloop.pollfds, num_poll_fds,
  957. timeout ? timeout_ms : -1);
  958. #endif /* CONFIG_ELOOP_POLL */
  959. #ifdef CONFIG_ELOOP_SELECT
  960. eloop_sock_table_set_fds(&eloop.readers, rfds);
  961. eloop_sock_table_set_fds(&eloop.writers, wfds);
  962. eloop_sock_table_set_fds(&eloop.exceptions, efds);
  963. res = select(eloop.max_sock + 1, rfds, wfds, efds,
  964. timeout ? &_tv : NULL);
  965. #endif /* CONFIG_ELOOP_SELECT */
  966. #ifdef CONFIG_ELOOP_EPOLL
  967. if (eloop.count == 0) {
  968. res = 0;
  969. } else {
  970. res = epoll_wait(eloop.epollfd, eloop.epoll_events,
  971. eloop.count, timeout_ms);
  972. }
  973. #endif /* CONFIG_ELOOP_EPOLL */
  974. #ifdef CONFIG_ELOOP_KQUEUE
  975. if (eloop.count == 0) {
  976. res = 0;
  977. } else {
  978. res = kevent(eloop.kqueuefd, NULL, 0,
  979. eloop.kqueue_events, eloop.kqueue_nevents,
  980. timeout ? &ts : NULL);
  981. }
  982. #endif /* CONFIG_ELOOP_KQUEUE */
  983. if (res < 0 && errno != EINTR && errno != 0) {
  984. wpa_printf(MSG_ERROR, "eloop: %s: %s",
  985. #ifdef CONFIG_ELOOP_POLL
  986. "poll"
  987. #endif /* CONFIG_ELOOP_POLL */
  988. #ifdef CONFIG_ELOOP_SELECT
  989. "select"
  990. #endif /* CONFIG_ELOOP_SELECT */
  991. #ifdef CONFIG_ELOOP_EPOLL
  992. "epoll"
  993. #endif /* CONFIG_ELOOP_EPOLL */
  994. #ifdef CONFIG_ELOOP_KQUEUE
  995. "kqueue"
  996. #endif /* CONFIG_ELOOP_EKQUEUE */
  997. , strerror(errno));
  998. goto out;
  999. }
  1000. eloop.readers.changed = 0;
  1001. eloop.writers.changed = 0;
  1002. eloop.exceptions.changed = 0;
  1003. eloop_process_pending_signals();
  1004. /* check if some registered timeouts have occurred */
  1005. timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
  1006. list);
  1007. if (timeout) {
  1008. os_get_reltime(&now);
  1009. if (!os_reltime_before(&now, &timeout->time)) {
  1010. void *eloop_data = timeout->eloop_data;
  1011. void *user_data = timeout->user_data;
  1012. eloop_timeout_handler handler =
  1013. timeout->handler;
  1014. eloop_remove_timeout(timeout);
  1015. handler(eloop_data, user_data);
  1016. }
  1017. }
  1018. if (res <= 0)
  1019. continue;
  1020. if (eloop.readers.changed ||
  1021. eloop.writers.changed ||
  1022. eloop.exceptions.changed) {
  1023. /*
  1024. * Sockets may have been closed and reopened with the
  1025. * same FD in the signal or timeout handlers, so we
  1026. * must skip the previous results and check again
  1027. * whether any of the currently registered sockets have
  1028. * events.
  1029. */
  1030. continue;
  1031. }
  1032. #ifdef CONFIG_ELOOP_POLL
  1033. eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
  1034. &eloop.exceptions, eloop.pollfds_map,
  1035. eloop.max_pollfd_map);
  1036. #endif /* CONFIG_ELOOP_POLL */
  1037. #ifdef CONFIG_ELOOP_SELECT
  1038. eloop_sock_table_dispatch(&eloop.readers, rfds);
  1039. eloop_sock_table_dispatch(&eloop.writers, wfds);
  1040. eloop_sock_table_dispatch(&eloop.exceptions, efds);
  1041. #endif /* CONFIG_ELOOP_SELECT */
  1042. #ifdef CONFIG_ELOOP_EPOLL
  1043. eloop_sock_table_dispatch(eloop.epoll_events, res);
  1044. #endif /* CONFIG_ELOOP_EPOLL */
  1045. #ifdef CONFIG_ELOOP_KQUEUE
  1046. eloop_sock_table_dispatch(eloop.kqueue_events, res);
  1047. #endif /* CONFIG_ELOOP_KQUEUE */
  1048. }
  1049. eloop.terminate = 0;
  1050. out:
  1051. #ifdef CONFIG_ELOOP_SELECT
  1052. os_free(rfds);
  1053. os_free(wfds);
  1054. os_free(efds);
  1055. #endif /* CONFIG_ELOOP_SELECT */
  1056. return;
  1057. }
  1058. void eloop_terminate(void)
  1059. {
  1060. eloop.terminate = 1;
  1061. }
  1062. void eloop_destroy(void)
  1063. {
  1064. struct eloop_timeout *timeout, *prev;
  1065. struct os_reltime now;
  1066. os_get_reltime(&now);
  1067. dl_list_for_each_safe(timeout, prev, &eloop.timeout,
  1068. struct eloop_timeout, list) {
  1069. int sec, usec;
  1070. sec = timeout->time.sec - now.sec;
  1071. usec = timeout->time.usec - now.usec;
  1072. if (timeout->time.usec < now.usec) {
  1073. sec--;
  1074. usec += 1000000;
  1075. }
  1076. wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d "
  1077. "eloop_data=%p user_data=%p handler=%p",
  1078. sec, usec, timeout->eloop_data, timeout->user_data,
  1079. timeout->handler);
  1080. wpa_trace_dump_funcname("eloop unregistered timeout handler",
  1081. timeout->handler);
  1082. wpa_trace_dump("eloop timeout", timeout);
  1083. eloop_remove_timeout(timeout);
  1084. }
  1085. eloop_sock_table_destroy(&eloop.readers);
  1086. eloop_sock_table_destroy(&eloop.writers);
  1087. eloop_sock_table_destroy(&eloop.exceptions);
  1088. os_free(eloop.signals);
  1089. #ifdef CONFIG_ELOOP_POLL
  1090. os_free(eloop.pollfds);
  1091. os_free(eloop.pollfds_map);
  1092. #endif /* CONFIG_ELOOP_POLL */
  1093. #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
  1094. os_free(eloop.fd_table);
  1095. #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
  1096. #ifdef CONFIG_ELOOP_EPOLL
  1097. os_free(eloop.epoll_events);
  1098. close(eloop.epollfd);
  1099. #endif /* CONFIG_ELOOP_EPOLL */
  1100. #ifdef CONFIG_ELOOP_KQUEUE
  1101. os_free(eloop.kqueue_events);
  1102. close(eloop.kqueuefd);
  1103. #endif /* CONFIG_ELOOP_KQUEUE */
  1104. }
  1105. int eloop_terminated(void)
  1106. {
  1107. return eloop.terminate || eloop.pending_terminate;
  1108. }
  1109. void eloop_wait_for_read_sock(int sock)
  1110. {
  1111. #ifdef CONFIG_ELOOP_POLL
  1112. struct pollfd pfd;
  1113. if (sock < 0)
  1114. return;
  1115. os_memset(&pfd, 0, sizeof(pfd));
  1116. pfd.fd = sock;
  1117. pfd.events = POLLIN;
  1118. poll(&pfd, 1, -1);
  1119. #endif /* CONFIG_ELOOP_POLL */
  1120. #if defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL)
  1121. /*
  1122. * We can use epoll() here. But epoll() requres 4 system calls.
  1123. * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for
  1124. * epoll fd. So select() is better for performance here.
  1125. */
  1126. fd_set rfds;
  1127. if (sock < 0)
  1128. return;
  1129. FD_ZERO(&rfds);
  1130. FD_SET(sock, &rfds);
  1131. select(sock + 1, &rfds, NULL, NULL, NULL);
  1132. #endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */
  1133. #ifdef CONFIG_ELOOP_KQUEUE
  1134. int kfd;
  1135. struct kevent ke1, ke2;
  1136. kfd = kqueue();
  1137. if (kfd == -1)
  1138. return;
  1139. EV_SET(&ke1, sock, EVFILT_READ, EV_ADD | EV_ONESHOT, 0, 0, 0);
  1140. kevent(kfd, &ke1, 1, &ke2, 1, NULL);
  1141. close(kfd);
  1142. #endif /* CONFIG_ELOOP_KQUEUE */
  1143. }
  1144. #ifdef CONFIG_ELOOP_SELECT
  1145. #undef CONFIG_ELOOP_SELECT
  1146. #endif /* CONFIG_ELOOP_SELECT */