eloop.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142
  1. /*
  2. * Event loop based on select() loop
  3. * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
  4. *
  5. * This software may be distributed under the terms of the BSD license.
  6. * See README for more details.
  7. */
  8. #include "includes.h"
  9. #include <assert.h>
  10. #include "common.h"
  11. #include "trace.h"
  12. #include "list.h"
  13. #include "eloop.h"
  14. #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL)
  15. #error Do not define both of poll and epoll
  16. #endif
  17. #if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL)
  18. #define CONFIG_ELOOP_SELECT
  19. #endif
  20. #ifdef CONFIG_ELOOP_POLL
  21. #include <poll.h>
  22. #endif /* CONFIG_ELOOP_POLL */
  23. #ifdef CONFIG_ELOOP_EPOLL
  24. #include <sys/epoll.h>
  25. #endif /* CONFIG_ELOOP_EPOLL */
  26. struct eloop_sock {
  27. int sock;
  28. void *eloop_data;
  29. void *user_data;
  30. eloop_sock_handler handler;
  31. WPA_TRACE_REF(eloop);
  32. WPA_TRACE_REF(user);
  33. WPA_TRACE_INFO
  34. };
  35. struct eloop_timeout {
  36. struct dl_list list;
  37. struct os_reltime time;
  38. void *eloop_data;
  39. void *user_data;
  40. eloop_timeout_handler handler;
  41. WPA_TRACE_REF(eloop);
  42. WPA_TRACE_REF(user);
  43. WPA_TRACE_INFO
  44. };
  45. struct eloop_signal {
  46. int sig;
  47. void *user_data;
  48. eloop_signal_handler handler;
  49. int signaled;
  50. };
  51. struct eloop_sock_table {
  52. int count;
  53. struct eloop_sock *table;
  54. eloop_event_type type;
  55. int changed;
  56. };
  57. struct eloop_data {
  58. int max_sock;
  59. int count; /* sum of all table counts */
  60. #ifdef CONFIG_ELOOP_POLL
  61. int max_pollfd_map; /* number of pollfds_map currently allocated */
  62. int max_poll_fds; /* number of pollfds currently allocated */
  63. struct pollfd *pollfds;
  64. struct pollfd **pollfds_map;
  65. #endif /* CONFIG_ELOOP_POLL */
  66. #ifdef CONFIG_ELOOP_EPOLL
  67. int epollfd;
  68. int epoll_max_event_num;
  69. int epoll_max_fd;
  70. struct eloop_sock *epoll_table;
  71. struct epoll_event *epoll_events;
  72. #endif /* CONFIG_ELOOP_EPOLL */
  73. struct eloop_sock_table readers;
  74. struct eloop_sock_table writers;
  75. struct eloop_sock_table exceptions;
  76. struct dl_list timeout;
  77. int signal_count;
  78. struct eloop_signal *signals;
  79. int signaled;
  80. int pending_terminate;
  81. int terminate;
  82. };
  83. static struct eloop_data eloop;
  84. #ifdef WPA_TRACE
  85. static void eloop_sigsegv_handler(int sig)
  86. {
  87. wpa_trace_show("eloop SIGSEGV");
  88. abort();
  89. }
  90. static void eloop_trace_sock_add_ref(struct eloop_sock_table *table)
  91. {
  92. int i;
  93. if (table == NULL || table->table == NULL)
  94. return;
  95. for (i = 0; i < table->count; i++) {
  96. wpa_trace_add_ref(&table->table[i], eloop,
  97. table->table[i].eloop_data);
  98. wpa_trace_add_ref(&table->table[i], user,
  99. table->table[i].user_data);
  100. }
  101. }
  102. static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table)
  103. {
  104. int i;
  105. if (table == NULL || table->table == NULL)
  106. return;
  107. for (i = 0; i < table->count; i++) {
  108. wpa_trace_remove_ref(&table->table[i], eloop,
  109. table->table[i].eloop_data);
  110. wpa_trace_remove_ref(&table->table[i], user,
  111. table->table[i].user_data);
  112. }
  113. }
  114. #else /* WPA_TRACE */
  115. #define eloop_trace_sock_add_ref(table) do { } while (0)
  116. #define eloop_trace_sock_remove_ref(table) do { } while (0)
  117. #endif /* WPA_TRACE */
  118. int eloop_init(void)
  119. {
  120. os_memset(&eloop, 0, sizeof(eloop));
  121. dl_list_init(&eloop.timeout);
  122. #ifdef CONFIG_ELOOP_EPOLL
  123. eloop.epollfd = epoll_create1(0);
  124. if (eloop.epollfd < 0) {
  125. wpa_printf(MSG_ERROR, "%s: epoll_create1 failed. %s\n",
  126. __func__, strerror(errno));
  127. return -1;
  128. }
  129. eloop.readers.type = EVENT_TYPE_READ;
  130. eloop.writers.type = EVENT_TYPE_WRITE;
  131. eloop.exceptions.type = EVENT_TYPE_EXCEPTION;
  132. #endif /* CONFIG_ELOOP_EPOLL */
  133. #ifdef WPA_TRACE
  134. signal(SIGSEGV, eloop_sigsegv_handler);
  135. #endif /* WPA_TRACE */
  136. return 0;
  137. }
  138. static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
  139. int sock, eloop_sock_handler handler,
  140. void *eloop_data, void *user_data)
  141. {
  142. #ifdef CONFIG_ELOOP_EPOLL
  143. struct eloop_sock *temp_table;
  144. struct epoll_event ev, *temp_events;
  145. int next;
  146. #endif /* CONFIG_ELOOP_EPOLL */
  147. struct eloop_sock *tmp;
  148. int new_max_sock;
  149. if (sock > eloop.max_sock)
  150. new_max_sock = sock;
  151. else
  152. new_max_sock = eloop.max_sock;
  153. if (table == NULL)
  154. return -1;
  155. #ifdef CONFIG_ELOOP_POLL
  156. if (new_max_sock >= eloop.max_pollfd_map) {
  157. struct pollfd **nmap;
  158. nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50,
  159. sizeof(struct pollfd *));
  160. if (nmap == NULL)
  161. return -1;
  162. eloop.max_pollfd_map = new_max_sock + 50;
  163. eloop.pollfds_map = nmap;
  164. }
  165. if (eloop.count + 1 > eloop.max_poll_fds) {
  166. struct pollfd *n;
  167. int nmax = eloop.count + 1 + 50;
  168. n = os_realloc_array(eloop.pollfds, nmax,
  169. sizeof(struct pollfd));
  170. if (n == NULL)
  171. return -1;
  172. eloop.max_poll_fds = nmax;
  173. eloop.pollfds = n;
  174. }
  175. #endif /* CONFIG_ELOOP_POLL */
  176. #ifdef CONFIG_ELOOP_EPOLL
  177. if (new_max_sock >= eloop.epoll_max_fd) {
  178. next = eloop.epoll_max_fd == 0 ? 16 : eloop.epoll_max_fd * 2;
  179. temp_table = os_realloc_array(eloop.epoll_table, next,
  180. sizeof(struct eloop_sock));
  181. if (temp_table == NULL)
  182. return -1;
  183. eloop.epoll_max_fd = next;
  184. eloop.epoll_table = temp_table;
  185. }
  186. if (eloop.count + 1 > eloop.epoll_max_event_num) {
  187. next = eloop.epoll_max_event_num == 0 ? 8 :
  188. eloop.epoll_max_event_num * 2;
  189. temp_events = os_realloc_array(eloop.epoll_events, next,
  190. sizeof(struct epoll_event));
  191. if (temp_events == NULL) {
  192. wpa_printf(MSG_ERROR, "%s: malloc for epoll failed. "
  193. "%s\n", __func__, strerror(errno));
  194. return -1;
  195. }
  196. eloop.epoll_max_event_num = next;
  197. eloop.epoll_events = temp_events;
  198. }
  199. #endif /* CONFIG_ELOOP_EPOLL */
  200. eloop_trace_sock_remove_ref(table);
  201. tmp = os_realloc_array(table->table, table->count + 1,
  202. sizeof(struct eloop_sock));
  203. if (tmp == NULL) {
  204. eloop_trace_sock_add_ref(table);
  205. return -1;
  206. }
  207. tmp[table->count].sock = sock;
  208. tmp[table->count].eloop_data = eloop_data;
  209. tmp[table->count].user_data = user_data;
  210. tmp[table->count].handler = handler;
  211. wpa_trace_record(&tmp[table->count]);
  212. table->count++;
  213. table->table = tmp;
  214. eloop.max_sock = new_max_sock;
  215. eloop.count++;
  216. table->changed = 1;
  217. eloop_trace_sock_add_ref(table);
  218. #ifdef CONFIG_ELOOP_EPOLL
  219. os_memset(&ev, 0, sizeof(ev));
  220. switch (table->type) {
  221. case EVENT_TYPE_READ:
  222. ev.events = EPOLLIN;
  223. break;
  224. case EVENT_TYPE_WRITE:
  225. ev.events = EPOLLOUT;
  226. break;
  227. /*
  228. * Exceptions are always checked when using epoll, but I suppose it's
  229. * possible that someone registered a socket *only* for exception
  230. * handling.
  231. */
  232. case EVENT_TYPE_EXCEPTION:
  233. ev.events = EPOLLERR | EPOLLHUP;
  234. break;
  235. }
  236. ev.data.fd = sock;
  237. if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
  238. wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d "
  239. "failed. %s\n", __func__, sock, strerror(errno));
  240. return -1;
  241. }
  242. os_memcpy(&eloop.epoll_table[sock], &table->table[table->count - 1],
  243. sizeof(struct eloop_sock));
  244. #endif /* CONFIG_ELOOP_EPOLL */
  245. return 0;
  246. }
  247. static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
  248. int sock)
  249. {
  250. int i;
  251. if (table == NULL || table->table == NULL || table->count == 0)
  252. return;
  253. for (i = 0; i < table->count; i++) {
  254. if (table->table[i].sock == sock)
  255. break;
  256. }
  257. if (i == table->count)
  258. return;
  259. eloop_trace_sock_remove_ref(table);
  260. if (i != table->count - 1) {
  261. os_memmove(&table->table[i], &table->table[i + 1],
  262. (table->count - i - 1) *
  263. sizeof(struct eloop_sock));
  264. }
  265. table->count--;
  266. eloop.count--;
  267. table->changed = 1;
  268. eloop_trace_sock_add_ref(table);
  269. #ifdef CONFIG_ELOOP_EPOLL
  270. if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) {
  271. wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d "
  272. "failed. %s\n", __func__, sock, strerror(errno));
  273. return;
  274. }
  275. os_memset(&eloop.epoll_table[sock], 0, sizeof(struct eloop_sock));
  276. #endif /* CONFIG_ELOOP_EPOLL */
  277. }
  278. #ifdef CONFIG_ELOOP_POLL
  279. static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
  280. {
  281. if (fd < mx && fd >= 0)
  282. return pollfds_map[fd];
  283. return NULL;
  284. }
  285. static int eloop_sock_table_set_fds(struct eloop_sock_table *readers,
  286. struct eloop_sock_table *writers,
  287. struct eloop_sock_table *exceptions,
  288. struct pollfd *pollfds,
  289. struct pollfd **pollfds_map,
  290. int max_pollfd_map)
  291. {
  292. int i;
  293. int nxt = 0;
  294. int fd;
  295. struct pollfd *pfd;
  296. /* Clear pollfd lookup map. It will be re-populated below. */
  297. os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map);
  298. if (readers && readers->table) {
  299. for (i = 0; i < readers->count; i++) {
  300. fd = readers->table[i].sock;
  301. assert(fd >= 0 && fd < max_pollfd_map);
  302. pollfds[nxt].fd = fd;
  303. pollfds[nxt].events = POLLIN;
  304. pollfds[nxt].revents = 0;
  305. pollfds_map[fd] = &(pollfds[nxt]);
  306. nxt++;
  307. }
  308. }
  309. if (writers && writers->table) {
  310. for (i = 0; i < writers->count; i++) {
  311. /*
  312. * See if we already added this descriptor, update it
  313. * if so.
  314. */
  315. fd = writers->table[i].sock;
  316. assert(fd >= 0 && fd < max_pollfd_map);
  317. pfd = pollfds_map[fd];
  318. if (!pfd) {
  319. pfd = &(pollfds[nxt]);
  320. pfd->events = 0;
  321. pfd->fd = fd;
  322. pollfds[i].revents = 0;
  323. pollfds_map[fd] = pfd;
  324. nxt++;
  325. }
  326. pfd->events |= POLLOUT;
  327. }
  328. }
  329. /*
  330. * Exceptions are always checked when using poll, but I suppose it's
  331. * possible that someone registered a socket *only* for exception
  332. * handling. Set the POLLIN bit in this case.
  333. */
  334. if (exceptions && exceptions->table) {
  335. for (i = 0; i < exceptions->count; i++) {
  336. /*
  337. * See if we already added this descriptor, just use it
  338. * if so.
  339. */
  340. fd = exceptions->table[i].sock;
  341. assert(fd >= 0 && fd < max_pollfd_map);
  342. pfd = pollfds_map[fd];
  343. if (!pfd) {
  344. pfd = &(pollfds[nxt]);
  345. pfd->events = POLLIN;
  346. pfd->fd = fd;
  347. pollfds[i].revents = 0;
  348. pollfds_map[fd] = pfd;
  349. nxt++;
  350. }
  351. }
  352. }
  353. return nxt;
  354. }
  355. static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table,
  356. struct pollfd **pollfds_map,
  357. int max_pollfd_map,
  358. short int revents)
  359. {
  360. int i;
  361. struct pollfd *pfd;
  362. if (!table || !table->table)
  363. return 0;
  364. table->changed = 0;
  365. for (i = 0; i < table->count; i++) {
  366. pfd = find_pollfd(pollfds_map, table->table[i].sock,
  367. max_pollfd_map);
  368. if (!pfd)
  369. continue;
  370. if (!(pfd->revents & revents))
  371. continue;
  372. table->table[i].handler(table->table[i].sock,
  373. table->table[i].eloop_data,
  374. table->table[i].user_data);
  375. if (table->changed)
  376. return 1;
  377. }
  378. return 0;
  379. }
  380. static void eloop_sock_table_dispatch(struct eloop_sock_table *readers,
  381. struct eloop_sock_table *writers,
  382. struct eloop_sock_table *exceptions,
  383. struct pollfd **pollfds_map,
  384. int max_pollfd_map)
  385. {
  386. if (eloop_sock_table_dispatch_table(readers, pollfds_map,
  387. max_pollfd_map, POLLIN | POLLERR |
  388. POLLHUP))
  389. return; /* pollfds may be invalid at this point */
  390. if (eloop_sock_table_dispatch_table(writers, pollfds_map,
  391. max_pollfd_map, POLLOUT))
  392. return; /* pollfds may be invalid at this point */
  393. eloop_sock_table_dispatch_table(exceptions, pollfds_map,
  394. max_pollfd_map, POLLERR | POLLHUP);
  395. }
  396. #endif /* CONFIG_ELOOP_POLL */
  397. #ifdef CONFIG_ELOOP_SELECT
  398. static void eloop_sock_table_set_fds(struct eloop_sock_table *table,
  399. fd_set *fds)
  400. {
  401. int i;
  402. FD_ZERO(fds);
  403. if (table->table == NULL)
  404. return;
  405. for (i = 0; i < table->count; i++) {
  406. assert(table->table[i].sock >= 0);
  407. FD_SET(table->table[i].sock, fds);
  408. }
  409. }
  410. static void eloop_sock_table_dispatch(struct eloop_sock_table *table,
  411. fd_set *fds)
  412. {
  413. int i;
  414. if (table == NULL || table->table == NULL)
  415. return;
  416. table->changed = 0;
  417. for (i = 0; i < table->count; i++) {
  418. if (FD_ISSET(table->table[i].sock, fds)) {
  419. table->table[i].handler(table->table[i].sock,
  420. table->table[i].eloop_data,
  421. table->table[i].user_data);
  422. if (table->changed)
  423. break;
  424. }
  425. }
  426. }
  427. #endif /* CONFIG_ELOOP_SELECT */
  428. #ifdef CONFIG_ELOOP_EPOLL
  429. static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds)
  430. {
  431. struct eloop_sock *table;
  432. int i;
  433. for (i = 0; i < nfds; i++) {
  434. table = &eloop.epoll_table[events[i].data.fd];
  435. if (table->handler == NULL)
  436. continue;
  437. table->handler(table->sock, table->eloop_data,
  438. table->user_data);
  439. if (eloop.readers.changed ||
  440. eloop.writers.changed ||
  441. eloop.exceptions.changed)
  442. break;
  443. }
  444. }
  445. #endif /* CONFIG_ELOOP_EPOLL */
  446. static void eloop_sock_table_destroy(struct eloop_sock_table *table)
  447. {
  448. if (table) {
  449. int i;
  450. for (i = 0; i < table->count && table->table; i++) {
  451. wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
  452. "sock=%d eloop_data=%p user_data=%p "
  453. "handler=%p",
  454. table->table[i].sock,
  455. table->table[i].eloop_data,
  456. table->table[i].user_data,
  457. table->table[i].handler);
  458. wpa_trace_dump_funcname("eloop unregistered socket "
  459. "handler",
  460. table->table[i].handler);
  461. wpa_trace_dump("eloop sock", &table->table[i]);
  462. }
  463. os_free(table->table);
  464. }
  465. }
  466. int eloop_register_read_sock(int sock, eloop_sock_handler handler,
  467. void *eloop_data, void *user_data)
  468. {
  469. return eloop_register_sock(sock, EVENT_TYPE_READ, handler,
  470. eloop_data, user_data);
  471. }
  472. void eloop_unregister_read_sock(int sock)
  473. {
  474. eloop_unregister_sock(sock, EVENT_TYPE_READ);
  475. }
  476. static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type)
  477. {
  478. switch (type) {
  479. case EVENT_TYPE_READ:
  480. return &eloop.readers;
  481. case EVENT_TYPE_WRITE:
  482. return &eloop.writers;
  483. case EVENT_TYPE_EXCEPTION:
  484. return &eloop.exceptions;
  485. }
  486. return NULL;
  487. }
  488. int eloop_register_sock(int sock, eloop_event_type type,
  489. eloop_sock_handler handler,
  490. void *eloop_data, void *user_data)
  491. {
  492. struct eloop_sock_table *table;
  493. assert(sock >= 0);
  494. table = eloop_get_sock_table(type);
  495. return eloop_sock_table_add_sock(table, sock, handler,
  496. eloop_data, user_data);
  497. }
  498. void eloop_unregister_sock(int sock, eloop_event_type type)
  499. {
  500. struct eloop_sock_table *table;
  501. table = eloop_get_sock_table(type);
  502. eloop_sock_table_remove_sock(table, sock);
  503. }
  504. int eloop_register_timeout(unsigned int secs, unsigned int usecs,
  505. eloop_timeout_handler handler,
  506. void *eloop_data, void *user_data)
  507. {
  508. struct eloop_timeout *timeout, *tmp;
  509. os_time_t now_sec;
  510. timeout = os_zalloc(sizeof(*timeout));
  511. if (timeout == NULL)
  512. return -1;
  513. if (os_get_reltime(&timeout->time) < 0) {
  514. os_free(timeout);
  515. return -1;
  516. }
  517. now_sec = timeout->time.sec;
  518. timeout->time.sec += secs;
  519. if (timeout->time.sec < now_sec) {
  520. /*
  521. * Integer overflow - assume long enough timeout to be assumed
  522. * to be infinite, i.e., the timeout would never happen.
  523. */
  524. wpa_printf(MSG_DEBUG, "ELOOP: Too long timeout (secs=%u) to "
  525. "ever happen - ignore it", secs);
  526. os_free(timeout);
  527. return 0;
  528. }
  529. timeout->time.usec += usecs;
  530. while (timeout->time.usec >= 1000000) {
  531. timeout->time.sec++;
  532. timeout->time.usec -= 1000000;
  533. }
  534. timeout->eloop_data = eloop_data;
  535. timeout->user_data = user_data;
  536. timeout->handler = handler;
  537. wpa_trace_add_ref(timeout, eloop, eloop_data);
  538. wpa_trace_add_ref(timeout, user, user_data);
  539. wpa_trace_record(timeout);
  540. /* Maintain timeouts in order of increasing time */
  541. dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
  542. if (os_reltime_before(&timeout->time, &tmp->time)) {
  543. dl_list_add(tmp->list.prev, &timeout->list);
  544. return 0;
  545. }
  546. }
  547. dl_list_add_tail(&eloop.timeout, &timeout->list);
  548. return 0;
  549. }
  550. static void eloop_remove_timeout(struct eloop_timeout *timeout)
  551. {
  552. dl_list_del(&timeout->list);
  553. wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data);
  554. wpa_trace_remove_ref(timeout, user, timeout->user_data);
  555. os_free(timeout);
  556. }
  557. int eloop_cancel_timeout(eloop_timeout_handler handler,
  558. void *eloop_data, void *user_data)
  559. {
  560. struct eloop_timeout *timeout, *prev;
  561. int removed = 0;
  562. dl_list_for_each_safe(timeout, prev, &eloop.timeout,
  563. struct eloop_timeout, list) {
  564. if (timeout->handler == handler &&
  565. (timeout->eloop_data == eloop_data ||
  566. eloop_data == ELOOP_ALL_CTX) &&
  567. (timeout->user_data == user_data ||
  568. user_data == ELOOP_ALL_CTX)) {
  569. eloop_remove_timeout(timeout);
  570. removed++;
  571. }
  572. }
  573. return removed;
  574. }
  575. int eloop_cancel_timeout_one(eloop_timeout_handler handler,
  576. void *eloop_data, void *user_data,
  577. struct os_reltime *remaining)
  578. {
  579. struct eloop_timeout *timeout, *prev;
  580. int removed = 0;
  581. struct os_reltime now;
  582. os_get_reltime(&now);
  583. remaining->sec = remaining->usec = 0;
  584. dl_list_for_each_safe(timeout, prev, &eloop.timeout,
  585. struct eloop_timeout, list) {
  586. if (timeout->handler == handler &&
  587. (timeout->eloop_data == eloop_data) &&
  588. (timeout->user_data == user_data)) {
  589. removed = 1;
  590. if (os_reltime_before(&now, &timeout->time))
  591. os_reltime_sub(&timeout->time, &now, remaining);
  592. eloop_remove_timeout(timeout);
  593. break;
  594. }
  595. }
  596. return removed;
  597. }
  598. int eloop_is_timeout_registered(eloop_timeout_handler handler,
  599. void *eloop_data, void *user_data)
  600. {
  601. struct eloop_timeout *tmp;
  602. dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
  603. if (tmp->handler == handler &&
  604. tmp->eloop_data == eloop_data &&
  605. tmp->user_data == user_data)
  606. return 1;
  607. }
  608. return 0;
  609. }
  610. int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs,
  611. eloop_timeout_handler handler, void *eloop_data,
  612. void *user_data)
  613. {
  614. struct os_reltime now, requested, remaining;
  615. struct eloop_timeout *tmp;
  616. dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
  617. if (tmp->handler == handler &&
  618. tmp->eloop_data == eloop_data &&
  619. tmp->user_data == user_data) {
  620. requested.sec = req_secs;
  621. requested.usec = req_usecs;
  622. os_get_reltime(&now);
  623. os_reltime_sub(&tmp->time, &now, &remaining);
  624. if (os_reltime_before(&requested, &remaining)) {
  625. eloop_cancel_timeout(handler, eloop_data,
  626. user_data);
  627. eloop_register_timeout(requested.sec,
  628. requested.usec,
  629. handler, eloop_data,
  630. user_data);
  631. return 1;
  632. }
  633. return 0;
  634. }
  635. }
  636. return -1;
  637. }
  638. int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs,
  639. eloop_timeout_handler handler, void *eloop_data,
  640. void *user_data)
  641. {
  642. struct os_reltime now, requested, remaining;
  643. struct eloop_timeout *tmp;
  644. dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
  645. if (tmp->handler == handler &&
  646. tmp->eloop_data == eloop_data &&
  647. tmp->user_data == user_data) {
  648. requested.sec = req_secs;
  649. requested.usec = req_usecs;
  650. os_get_reltime(&now);
  651. os_reltime_sub(&tmp->time, &now, &remaining);
  652. if (os_reltime_before(&remaining, &requested)) {
  653. eloop_cancel_timeout(handler, eloop_data,
  654. user_data);
  655. eloop_register_timeout(requested.sec,
  656. requested.usec,
  657. handler, eloop_data,
  658. user_data);
  659. return 1;
  660. }
  661. return 0;
  662. }
  663. }
  664. return -1;
  665. }
  666. #ifndef CONFIG_NATIVE_WINDOWS
  667. static void eloop_handle_alarm(int sig)
  668. {
  669. wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in "
  670. "two seconds. Looks like there\n"
  671. "is a bug that ends up in a busy loop that "
  672. "prevents clean shutdown.\n"
  673. "Killing program forcefully.\n");
  674. exit(1);
  675. }
  676. #endif /* CONFIG_NATIVE_WINDOWS */
  677. static void eloop_handle_signal(int sig)
  678. {
  679. int i;
  680. #ifndef CONFIG_NATIVE_WINDOWS
  681. if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) {
  682. /* Use SIGALRM to break out from potential busy loops that
  683. * would not allow the program to be killed. */
  684. eloop.pending_terminate = 1;
  685. signal(SIGALRM, eloop_handle_alarm);
  686. alarm(2);
  687. }
  688. #endif /* CONFIG_NATIVE_WINDOWS */
  689. eloop.signaled++;
  690. for (i = 0; i < eloop.signal_count; i++) {
  691. if (eloop.signals[i].sig == sig) {
  692. eloop.signals[i].signaled++;
  693. break;
  694. }
  695. }
  696. }
  697. static void eloop_process_pending_signals(void)
  698. {
  699. int i;
  700. if (eloop.signaled == 0)
  701. return;
  702. eloop.signaled = 0;
  703. if (eloop.pending_terminate) {
  704. #ifndef CONFIG_NATIVE_WINDOWS
  705. alarm(0);
  706. #endif /* CONFIG_NATIVE_WINDOWS */
  707. eloop.pending_terminate = 0;
  708. }
  709. for (i = 0; i < eloop.signal_count; i++) {
  710. if (eloop.signals[i].signaled) {
  711. eloop.signals[i].signaled = 0;
  712. eloop.signals[i].handler(eloop.signals[i].sig,
  713. eloop.signals[i].user_data);
  714. }
  715. }
  716. }
  717. int eloop_register_signal(int sig, eloop_signal_handler handler,
  718. void *user_data)
  719. {
  720. struct eloop_signal *tmp;
  721. tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1,
  722. sizeof(struct eloop_signal));
  723. if (tmp == NULL)
  724. return -1;
  725. tmp[eloop.signal_count].sig = sig;
  726. tmp[eloop.signal_count].user_data = user_data;
  727. tmp[eloop.signal_count].handler = handler;
  728. tmp[eloop.signal_count].signaled = 0;
  729. eloop.signal_count++;
  730. eloop.signals = tmp;
  731. signal(sig, eloop_handle_signal);
  732. return 0;
  733. }
  734. int eloop_register_signal_terminate(eloop_signal_handler handler,
  735. void *user_data)
  736. {
  737. int ret = eloop_register_signal(SIGINT, handler, user_data);
  738. if (ret == 0)
  739. ret = eloop_register_signal(SIGTERM, handler, user_data);
  740. return ret;
  741. }
  742. int eloop_register_signal_reconfig(eloop_signal_handler handler,
  743. void *user_data)
  744. {
  745. #ifdef CONFIG_NATIVE_WINDOWS
  746. return 0;
  747. #else /* CONFIG_NATIVE_WINDOWS */
  748. return eloop_register_signal(SIGHUP, handler, user_data);
  749. #endif /* CONFIG_NATIVE_WINDOWS */
  750. }
  751. void eloop_run(void)
  752. {
  753. #ifdef CONFIG_ELOOP_POLL
  754. int num_poll_fds;
  755. int timeout_ms = 0;
  756. #endif /* CONFIG_ELOOP_POLL */
  757. #ifdef CONFIG_ELOOP_SELECT
  758. fd_set *rfds, *wfds, *efds;
  759. struct timeval _tv;
  760. #endif /* CONFIG_ELOOP_SELECT */
  761. #ifdef CONFIG_ELOOP_EPOLL
  762. int timeout_ms = -1;
  763. #endif /* CONFIG_ELOOP_EPOLL */
  764. int res;
  765. struct os_reltime tv, now;
  766. #ifdef CONFIG_ELOOP_SELECT
  767. rfds = os_malloc(sizeof(*rfds));
  768. wfds = os_malloc(sizeof(*wfds));
  769. efds = os_malloc(sizeof(*efds));
  770. if (rfds == NULL || wfds == NULL || efds == NULL)
  771. goto out;
  772. #endif /* CONFIG_ELOOP_SELECT */
  773. while (!eloop.terminate &&
  774. (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
  775. eloop.writers.count > 0 || eloop.exceptions.count > 0)) {
  776. struct eloop_timeout *timeout;
  777. if (eloop.pending_terminate) {
  778. /*
  779. * This may happen in some corner cases where a signal
  780. * is received during a blocking operation. We need to
  781. * process the pending signals and exit if requested to
  782. * avoid hitting the SIGALRM limit if the blocking
  783. * operation took more than two seconds.
  784. */
  785. eloop_process_pending_signals();
  786. if (eloop.terminate)
  787. break;
  788. }
  789. timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
  790. list);
  791. if (timeout) {
  792. os_get_reltime(&now);
  793. if (os_reltime_before(&now, &timeout->time))
  794. os_reltime_sub(&timeout->time, &now, &tv);
  795. else
  796. tv.sec = tv.usec = 0;
  797. #if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL)
  798. timeout_ms = tv.sec * 1000 + tv.usec / 1000;
  799. #endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */
  800. #ifdef CONFIG_ELOOP_SELECT
  801. _tv.tv_sec = tv.sec;
  802. _tv.tv_usec = tv.usec;
  803. #endif /* CONFIG_ELOOP_SELECT */
  804. }
  805. #ifdef CONFIG_ELOOP_POLL
  806. num_poll_fds = eloop_sock_table_set_fds(
  807. &eloop.readers, &eloop.writers, &eloop.exceptions,
  808. eloop.pollfds, eloop.pollfds_map,
  809. eloop.max_pollfd_map);
  810. res = poll(eloop.pollfds, num_poll_fds,
  811. timeout ? timeout_ms : -1);
  812. #endif /* CONFIG_ELOOP_POLL */
  813. #ifdef CONFIG_ELOOP_SELECT
  814. eloop_sock_table_set_fds(&eloop.readers, rfds);
  815. eloop_sock_table_set_fds(&eloop.writers, wfds);
  816. eloop_sock_table_set_fds(&eloop.exceptions, efds);
  817. res = select(eloop.max_sock + 1, rfds, wfds, efds,
  818. timeout ? &_tv : NULL);
  819. #endif /* CONFIG_ELOOP_SELECT */
  820. #ifdef CONFIG_ELOOP_EPOLL
  821. if (eloop.count == 0) {
  822. res = 0;
  823. } else {
  824. res = epoll_wait(eloop.epollfd, eloop.epoll_events,
  825. eloop.count, timeout_ms);
  826. }
  827. #endif /* CONFIG_ELOOP_EPOLL */
  828. if (res < 0 && errno != EINTR && errno != 0) {
  829. wpa_printf(MSG_ERROR, "eloop: %s: %s",
  830. #ifdef CONFIG_ELOOP_POLL
  831. "poll"
  832. #endif /* CONFIG_ELOOP_POLL */
  833. #ifdef CONFIG_ELOOP_SELECT
  834. "select"
  835. #endif /* CONFIG_ELOOP_SELECT */
  836. #ifdef CONFIG_ELOOP_EPOLL
  837. "epoll"
  838. #endif /* CONFIG_ELOOP_EPOLL */
  839. , strerror(errno));
  840. goto out;
  841. }
  842. eloop.readers.changed = 0;
  843. eloop.writers.changed = 0;
  844. eloop.exceptions.changed = 0;
  845. eloop_process_pending_signals();
  846. /* check if some registered timeouts have occurred */
  847. timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
  848. list);
  849. if (timeout) {
  850. os_get_reltime(&now);
  851. if (!os_reltime_before(&now, &timeout->time)) {
  852. void *eloop_data = timeout->eloop_data;
  853. void *user_data = timeout->user_data;
  854. eloop_timeout_handler handler =
  855. timeout->handler;
  856. eloop_remove_timeout(timeout);
  857. handler(eloop_data, user_data);
  858. }
  859. }
  860. if (res <= 0)
  861. continue;
  862. if (eloop.readers.changed ||
  863. eloop.writers.changed ||
  864. eloop.exceptions.changed) {
  865. /*
  866. * Sockets may have been closed and reopened with the
  867. * same FD in the signal or timeout handlers, so we
  868. * must skip the previous results and check again
  869. * whether any of the currently registered sockets have
  870. * events.
  871. */
  872. continue;
  873. }
  874. #ifdef CONFIG_ELOOP_POLL
  875. eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
  876. &eloop.exceptions, eloop.pollfds_map,
  877. eloop.max_pollfd_map);
  878. #endif /* CONFIG_ELOOP_POLL */
  879. #ifdef CONFIG_ELOOP_SELECT
  880. eloop_sock_table_dispatch(&eloop.readers, rfds);
  881. eloop_sock_table_dispatch(&eloop.writers, wfds);
  882. eloop_sock_table_dispatch(&eloop.exceptions, efds);
  883. #endif /* CONFIG_ELOOP_SELECT */
  884. #ifdef CONFIG_ELOOP_EPOLL
  885. eloop_sock_table_dispatch(eloop.epoll_events, res);
  886. #endif /* CONFIG_ELOOP_EPOLL */
  887. }
  888. eloop.terminate = 0;
  889. out:
  890. #ifdef CONFIG_ELOOP_SELECT
  891. os_free(rfds);
  892. os_free(wfds);
  893. os_free(efds);
  894. #endif /* CONFIG_ELOOP_SELECT */
  895. return;
  896. }
  897. void eloop_terminate(void)
  898. {
  899. eloop.terminate = 1;
  900. }
  901. void eloop_destroy(void)
  902. {
  903. struct eloop_timeout *timeout, *prev;
  904. struct os_reltime now;
  905. os_get_reltime(&now);
  906. dl_list_for_each_safe(timeout, prev, &eloop.timeout,
  907. struct eloop_timeout, list) {
  908. int sec, usec;
  909. sec = timeout->time.sec - now.sec;
  910. usec = timeout->time.usec - now.usec;
  911. if (timeout->time.usec < now.usec) {
  912. sec--;
  913. usec += 1000000;
  914. }
  915. wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d "
  916. "eloop_data=%p user_data=%p handler=%p",
  917. sec, usec, timeout->eloop_data, timeout->user_data,
  918. timeout->handler);
  919. wpa_trace_dump_funcname("eloop unregistered timeout handler",
  920. timeout->handler);
  921. wpa_trace_dump("eloop timeout", timeout);
  922. eloop_remove_timeout(timeout);
  923. }
  924. eloop_sock_table_destroy(&eloop.readers);
  925. eloop_sock_table_destroy(&eloop.writers);
  926. eloop_sock_table_destroy(&eloop.exceptions);
  927. os_free(eloop.signals);
  928. #ifdef CONFIG_ELOOP_POLL
  929. os_free(eloop.pollfds);
  930. os_free(eloop.pollfds_map);
  931. #endif /* CONFIG_ELOOP_POLL */
  932. #ifdef CONFIG_ELOOP_EPOLL
  933. os_free(eloop.epoll_table);
  934. os_free(eloop.epoll_events);
  935. close(eloop.epollfd);
  936. #endif /* CONFIG_ELOOP_EPOLL */
  937. }
  938. int eloop_terminated(void)
  939. {
  940. return eloop.terminate || eloop.pending_terminate;
  941. }
  942. void eloop_wait_for_read_sock(int sock)
  943. {
  944. #ifdef CONFIG_ELOOP_POLL
  945. struct pollfd pfd;
  946. if (sock < 0)
  947. return;
  948. os_memset(&pfd, 0, sizeof(pfd));
  949. pfd.fd = sock;
  950. pfd.events = POLLIN;
  951. poll(&pfd, 1, -1);
  952. #endif /* CONFIG_ELOOP_POLL */
  953. #if defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL)
  954. /*
  955. * We can use epoll() here. But epoll() requres 4 system calls.
  956. * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for
  957. * epoll fd. So select() is better for performance here.
  958. */
  959. fd_set rfds;
  960. if (sock < 0)
  961. return;
  962. FD_ZERO(&rfds);
  963. FD_SET(sock, &rfds);
  964. select(sock + 1, &rfds, NULL, NULL, NULL);
  965. #endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */
  966. }
  967. #ifdef CONFIG_ELOOP_SELECT
  968. #undef CONFIG_ELOOP_SELECT
  969. #endif /* CONFIG_ELOOP_SELECT */