libuv.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688
  1. /*
  2. * libwebsockets - small server side websockets and web server implementation
  3. *
  4. * Copyright (C) 2010-2016 Andy Green <andy@warmcat.com>
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation:
  9. * version 2.1 of the License.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
  19. * MA 02110-1301 USA
  20. */
  21. #include "private-libwebsockets.h"
  22. void
  23. lws_feature_status_libuv(struct lws_context_creation_info *info)
  24. {
  25. if (lws_check_opt(info->options, LWS_SERVER_OPTION_LIBUV))
  26. lwsl_notice("libuv support compiled in and enabled\n");
  27. else
  28. lwsl_notice("libuv support compiled in but disabled\n");
  29. }
  30. static void
  31. lws_uv_idle(uv_idle_t *handle
  32. #if UV_VERSION_MAJOR == 0
  33. , int status
  34. #endif
  35. )
  36. {
  37. struct lws_context_per_thread *pt = lws_container_of(handle,
  38. struct lws_context_per_thread, uv_idle);
  39. // lwsl_debug("%s\n", __func__);
  40. /*
  41. * is there anybody with pending stuff that needs service forcing?
  42. */
  43. if (!lws_service_adjust_timeout(pt->context, 1, pt->tid)) {
  44. /* -1 timeout means just do forced service */
  45. _lws_plat_service_tsi(pt->context, -1, pt->tid);
  46. /* still somebody left who wants forced service? */
  47. if (!lws_service_adjust_timeout(pt->context, 1, pt->tid))
  48. /* yes... come back again later */
  49. // lwsl_debug("%s: done again\n", __func__);
  50. return;
  51. }
  52. /* there is nobody who needs service forcing, shut down idle */
  53. uv_idle_stop(handle);
  54. lwsl_debug("%s: done stop\n", __func__);
  55. }
  56. static void
  57. lws_io_cb(uv_poll_t *watcher, int status, int revents)
  58. {
  59. struct lws_io_watcher *lws_io = lws_container_of(watcher,
  60. struct lws_io_watcher, uv_watcher);
  61. struct lws *wsi = lws_container_of(lws_io, struct lws, w_read);
  62. struct lws_context *context = wsi->context;
  63. struct lws_pollfd eventfd;
  64. #if defined(WIN32) || defined(_WIN32)
  65. eventfd.fd = watcher->socket;
  66. #else
  67. eventfd.fd = watcher->io_watcher.fd;
  68. #endif
  69. eventfd.events = 0;
  70. eventfd.revents = 0;
  71. if (status < 0) {
  72. /* at this point status will be an UV error, like UV_EBADF,
  73. we treat all errors as LWS_POLLHUP */
  74. /* you might want to return; instead of servicing the fd in some cases */
  75. if (status == UV_EAGAIN)
  76. return;
  77. eventfd.events |= LWS_POLLHUP;
  78. eventfd.revents |= LWS_POLLHUP;
  79. } else {
  80. if (revents & UV_READABLE) {
  81. eventfd.events |= LWS_POLLIN;
  82. eventfd.revents |= LWS_POLLIN;
  83. }
  84. if (revents & UV_WRITABLE) {
  85. eventfd.events |= LWS_POLLOUT;
  86. eventfd.revents |= LWS_POLLOUT;
  87. }
  88. }
  89. lws_service_fd(context, &eventfd);
  90. uv_idle_start(&context->pt[(int)wsi->tsi].uv_idle, lws_uv_idle);
  91. }
  92. LWS_VISIBLE void
  93. lws_uv_sigint_cb(uv_signal_t *watcher, int signum)
  94. {
  95. lwsl_err("internal signal handler caught signal %d\n", signum);
  96. lws_libuv_stop(watcher->data);
  97. }
  98. LWS_VISIBLE int
  99. lws_uv_sigint_cfg(struct lws_context *context, int use_uv_sigint,
  100. uv_signal_cb cb)
  101. {
  102. context->use_ev_sigint = use_uv_sigint;
  103. if (cb)
  104. context->lws_uv_sigint_cb = cb;
  105. else
  106. context->lws_uv_sigint_cb = &lws_uv_sigint_cb;
  107. return 0;
  108. }
  109. static void
  110. lws_uv_timeout_cb(uv_timer_t *timer
  111. #if UV_VERSION_MAJOR == 0
  112. , int status
  113. #endif
  114. )
  115. {
  116. struct lws_context_per_thread *pt = lws_container_of(timer,
  117. struct lws_context_per_thread, uv_timeout_watcher);
  118. if (pt->context->requested_kill)
  119. return;
  120. lwsl_debug("%s\n", __func__);
  121. lws_service_fd_tsi(pt->context, NULL, pt->tid);
  122. }
  123. static const int sigs[] = { SIGINT, SIGTERM, SIGSEGV, SIGFPE, SIGHUP };
  124. int
  125. lws_uv_initvhost(struct lws_vhost* vh, struct lws* wsi)
  126. {
  127. struct lws_context_per_thread *pt;
  128. int n;
  129. if (!LWS_LIBUV_ENABLED(vh->context))
  130. return 0;
  131. if (!wsi)
  132. wsi = vh->lserv_wsi;
  133. if (!wsi)
  134. return 0;
  135. if (wsi->w_read.context)
  136. return 0;
  137. pt = &vh->context->pt[(int)wsi->tsi];
  138. if (!pt->io_loop_uv)
  139. return 0;
  140. wsi->w_read.context = vh->context;
  141. n = uv_poll_init_socket(pt->io_loop_uv,
  142. &wsi->w_read.uv_watcher, wsi->desc.sockfd);
  143. if (n) {
  144. lwsl_err("uv_poll_init failed %d, sockfd=%p\n",
  145. n, (void *)(long)wsi->desc.sockfd);
  146. return -1;
  147. }
  148. lws_libuv_io(wsi, LWS_EV_START | LWS_EV_READ);
  149. return 0;
  150. }
  151. /*
  152. * This needs to be called after vhosts have been defined.
  153. *
  154. * If later, after server start, another vhost is added, this must be
  155. * called again to bind the vhost
  156. */
  157. LWS_VISIBLE int
  158. lws_uv_initloop(struct lws_context *context, uv_loop_t *loop, int tsi)
  159. {
  160. struct lws_context_per_thread *pt = &context->pt[tsi];
  161. struct lws_vhost *vh = context->vhost_list;
  162. int status = 0, n, ns, first = 1;
  163. if (!pt->io_loop_uv) {
  164. if (!loop) {
  165. loop = lws_malloc(sizeof(*loop));
  166. if (!loop) {
  167. lwsl_err("OOM\n");
  168. return -1;
  169. }
  170. #if UV_VERSION_MAJOR > 0
  171. uv_loop_init(loop);
  172. #else
  173. lwsl_err("This libuv is too old to work...\n");
  174. return 1;
  175. #endif
  176. pt->ev_loop_foreign = 0;
  177. } else {
  178. lwsl_notice(" Using foreign event loop...\n");
  179. pt->ev_loop_foreign = 1;
  180. }
  181. pt->io_loop_uv = loop;
  182. uv_idle_init(loop, &pt->uv_idle);
  183. ns = ARRAY_SIZE(sigs);
  184. if (lws_check_opt(context->options,
  185. LWS_SERVER_OPTION_UV_NO_SIGSEGV_SIGFPE_SPIN))
  186. ns = 2;
  187. if (pt->context->use_ev_sigint) {
  188. assert(ns <= ARRAY_SIZE(pt->signals));
  189. for (n = 0; n < ns; n++) {
  190. uv_signal_init(loop, &pt->signals[n]);
  191. pt->signals[n].data = pt->context;
  192. uv_signal_start(&pt->signals[n],
  193. context->lws_uv_sigint_cb, sigs[n]);
  194. }
  195. }
  196. } else
  197. first = 0;
  198. /*
  199. * Initialize the accept wsi read watcher with all the listening sockets
  200. * and register a callback for read operations
  201. *
  202. * We have to do it here because the uv loop(s) are not
  203. * initialized until after context creation.
  204. */
  205. while (vh) {
  206. if (lws_uv_initvhost(vh, vh->lserv_wsi) == -1)
  207. return -1;
  208. vh = vh->vhost_next;
  209. }
  210. if (first) {
  211. uv_timer_init(pt->io_loop_uv, &pt->uv_timeout_watcher);
  212. uv_timer_start(&pt->uv_timeout_watcher, lws_uv_timeout_cb,
  213. 10, 1000);
  214. }
  215. return status;
  216. }
  217. static void lws_uv_close_cb(uv_handle_t *handle)
  218. {
  219. //lwsl_err("%s: handle %p\n", __func__, handle);
  220. }
  221. static void lws_uv_walk_cb(uv_handle_t *handle, void *arg)
  222. {
  223. if (!uv_is_closing(handle))
  224. uv_close(handle, lws_uv_close_cb);
  225. }
  226. void
  227. lws_libuv_destroyloop(struct lws_context *context, int tsi)
  228. {
  229. struct lws_context_per_thread *pt = &context->pt[tsi];
  230. // struct lws_context *ctx;
  231. int m, budget = 100, ns;
  232. if (!lws_check_opt(context->options, LWS_SERVER_OPTION_LIBUV))
  233. return;
  234. if (!pt->io_loop_uv)
  235. return;
  236. lwsl_notice("%s: closing signals + timers context %p\n", __func__, context);
  237. if (context->use_ev_sigint) {
  238. uv_signal_stop(&pt->w_sigint.uv_watcher);
  239. ns = ARRAY_SIZE(sigs);
  240. if (lws_check_opt(context->options, LWS_SERVER_OPTION_UV_NO_SIGSEGV_SIGFPE_SPIN))
  241. ns = 2;
  242. for (m = 0; m < ns; m++) {
  243. uv_signal_stop(&pt->signals[m]);
  244. uv_close((uv_handle_t *)&pt->signals[m], lws_uv_close_cb);
  245. }
  246. }
  247. uv_timer_stop(&pt->uv_timeout_watcher);
  248. uv_close((uv_handle_t *)&pt->uv_timeout_watcher, lws_uv_close_cb);
  249. uv_idle_stop(&pt->uv_idle);
  250. uv_close((uv_handle_t *)&pt->uv_idle, lws_uv_close_cb);
  251. if (pt->ev_loop_foreign)
  252. return;
  253. while (budget-- && uv_run(pt->io_loop_uv, UV_RUN_NOWAIT))
  254. ;
  255. lwsl_notice("%s: closing all loop handles context %p\n", __func__, context);
  256. uv_stop(pt->io_loop_uv);
  257. uv_walk(pt->io_loop_uv, lws_uv_walk_cb, NULL);
  258. while (uv_run(pt->io_loop_uv, UV_RUN_NOWAIT))
  259. ;
  260. #if UV_VERSION_MAJOR > 0
  261. m = uv_loop_close(pt->io_loop_uv);
  262. if (m == UV_EBUSY)
  263. lwsl_err("%s: uv_loop_close: UV_EBUSY\n", __func__);
  264. #endif
  265. lws_free(pt->io_loop_uv);
  266. }
  267. void
  268. lws_libuv_accept(struct lws *wsi, lws_sock_file_fd_type desc)
  269. {
  270. struct lws_context *context = lws_get_context(wsi);
  271. struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
  272. if (!LWS_LIBUV_ENABLED(context))
  273. return;
  274. lwsl_debug("%s: new wsi %p\n", __func__, wsi);
  275. wsi->w_read.context = context;
  276. if (wsi->mode == LWSCM_RAW_FILEDESC)
  277. uv_poll_init(pt->io_loop_uv, &wsi->w_read.uv_watcher,
  278. (int)desc.filefd);
  279. else
  280. uv_poll_init_socket(pt->io_loop_uv, &wsi->w_read.uv_watcher,
  281. desc.sockfd);
  282. }
  283. void
  284. lws_libuv_io(struct lws *wsi, int flags)
  285. {
  286. struct lws_context *context = lws_get_context(wsi);
  287. struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
  288. #if defined(WIN32) || defined(_WIN32)
  289. int current_events = wsi->w_read.uv_watcher.events &
  290. (UV_READABLE | UV_WRITABLE);
  291. #else
  292. int current_events = wsi->w_read.uv_watcher.io_watcher.pevents &
  293. (UV_READABLE | UV_WRITABLE);
  294. #endif
  295. struct lws_io_watcher *w = &wsi->w_read;
  296. if (!LWS_LIBUV_ENABLED(context))
  297. return;
  298. // lwsl_notice("%s: wsi: %p, flags:0x%x\n", __func__, wsi, flags);
  299. // w->context is set after the loop is initialized
  300. if (!pt->io_loop_uv || !w->context) {
  301. lwsl_info("%s: no io loop yet\n", __func__);
  302. return;
  303. }
  304. if (!((flags & (LWS_EV_START | LWS_EV_STOP)) &&
  305. (flags & (LWS_EV_READ | LWS_EV_WRITE)))) {
  306. lwsl_err("%s: assert: flags %d", __func__, flags);
  307. assert(0);
  308. }
  309. if (flags & LWS_EV_START) {
  310. if (flags & LWS_EV_WRITE)
  311. current_events |= UV_WRITABLE;
  312. if (flags & LWS_EV_READ)
  313. current_events |= UV_READABLE;
  314. uv_poll_start(&w->uv_watcher, current_events, lws_io_cb);
  315. } else {
  316. if (flags & LWS_EV_WRITE)
  317. current_events &= ~UV_WRITABLE;
  318. if (flags & LWS_EV_READ)
  319. current_events &= ~UV_READABLE;
  320. if (!(current_events & (UV_READABLE | UV_WRITABLE)))
  321. uv_poll_stop(&w->uv_watcher);
  322. else
  323. uv_poll_start(&w->uv_watcher, current_events,
  324. lws_io_cb);
  325. }
  326. }
  327. int
  328. lws_libuv_init_fd_table(struct lws_context *context)
  329. {
  330. int n;
  331. if (!LWS_LIBUV_ENABLED(context))
  332. return 0;
  333. for (n = 0; n < context->count_threads; n++)
  334. context->pt[n].w_sigint.context = context;
  335. return 1;
  336. }
  337. LWS_VISIBLE void
  338. lws_libuv_run(const struct lws_context *context, int tsi)
  339. {
  340. if (context->pt[tsi].io_loop_uv && LWS_LIBUV_ENABLED(context))
  341. uv_run(context->pt[tsi].io_loop_uv, 0);
  342. }
  343. LWS_VISIBLE void
  344. lws_libuv_stop_without_kill(const struct lws_context *context, int tsi)
  345. {
  346. if (context->pt[tsi].io_loop_uv && LWS_LIBUV_ENABLED(context))
  347. uv_stop(context->pt[tsi].io_loop_uv);
  348. }
  349. static void
  350. lws_libuv_kill(const struct lws_context *context)
  351. {
  352. int n;
  353. lwsl_notice("%s\n", __func__);
  354. for (n = 0; n < context->count_threads; n++)
  355. if (context->pt[n].io_loop_uv &&
  356. LWS_LIBUV_ENABLED(context) )//&&
  357. //!context->pt[n].ev_loop_foreign)
  358. uv_stop(context->pt[n].io_loop_uv);
  359. }
  360. /*
  361. * This does not actually stop the event loop. The reason is we have to pass
  362. * libuv handle closures through its event loop. So this tries to close all
  363. * wsi, and set a flag; when all the wsi closures are finalized then we
  364. * actually stop the libuv event loops.
  365. */
  366. LWS_VISIBLE void
  367. lws_libuv_stop(struct lws_context *context)
  368. {
  369. struct lws_context_per_thread *pt;
  370. int n, m;
  371. if (context->requested_kill)
  372. return;
  373. context->requested_kill = 1;
  374. m = context->count_threads;
  375. context->being_destroyed = 1;
  376. while (m--) {
  377. pt = &context->pt[m];
  378. for (n = 0; (unsigned int)n < context->pt[m].fds_count; n++) {
  379. struct lws *wsi = wsi_from_fd(context, pt->fds[n].fd);
  380. if (!wsi)
  381. continue;
  382. lws_close_free_wsi(wsi,
  383. LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY
  384. /* no protocol close */);
  385. n--;
  386. }
  387. }
  388. lwsl_info("%s: feels everything closed\n", __func__);
  389. if (context->count_wsi_allocated == 0)
  390. lws_libuv_kill(context);
  391. }
  392. LWS_VISIBLE uv_loop_t *
  393. lws_uv_getloop(struct lws_context *context, int tsi)
  394. {
  395. if (context->pt[tsi].io_loop_uv && LWS_LIBUV_ENABLED(context))
  396. return context->pt[tsi].io_loop_uv;
  397. return NULL;
  398. }
  399. static void
  400. lws_libuv_closewsi(uv_handle_t* handle)
  401. {
  402. struct lws *n = NULL, *wsi = (struct lws *)(((char *)handle) -
  403. (char *)(&n->w_read.uv_watcher));
  404. struct lws_context *context = lws_get_context(wsi);
  405. int lspd = 0;
  406. if (wsi->mode == LWSCM_SERVER_LISTENER &&
  407. wsi->context->deprecated) {
  408. lspd = 1;
  409. context->deprecation_pending_listen_close_count--;
  410. if (!context->deprecation_pending_listen_close_count)
  411. lspd = 2;
  412. }
  413. lws_close_free_wsi_final(wsi);
  414. if (lspd == 2 && context->deprecation_cb) {
  415. lwsl_notice("calling deprecation callback\n");
  416. context->deprecation_cb();
  417. }
  418. //lwsl_notice("%s: ctx %p: wsi left %d\n", __func__, context, context->count_wsi_allocated);
  419. if (context->requested_kill && context->count_wsi_allocated == 0)
  420. lws_libuv_kill(context);
  421. }
  422. void
  423. lws_libuv_closehandle(struct lws *wsi)
  424. {
  425. struct lws_context *context = lws_get_context(wsi);
  426. /* required to defer actual deletion until libuv has processed it */
  427. uv_close((uv_handle_t*)&wsi->w_read.uv_watcher, lws_libuv_closewsi);
  428. if (context->requested_kill && context->count_wsi_allocated == 0)
  429. lws_libuv_kill(context);
  430. }
  431. #if defined(LWS_WITH_PLUGINS) && (UV_VERSION_MAJOR > 0)
  432. LWS_VISIBLE int
  433. lws_plat_plugins_init(struct lws_context *context, const char * const *d)
  434. {
  435. struct lws_plugin_capability lcaps;
  436. struct lws_plugin *plugin;
  437. lws_plugin_init_func initfunc;
  438. int m, ret = 0;
  439. void *v;
  440. uv_dirent_t dent;
  441. uv_fs_t req;
  442. char path[256];
  443. uv_loop_t loop;
  444. uv_lib_t lib;
  445. int pofs = 0;
  446. #if defined(__MINGW32__) || !defined(WIN32)
  447. pofs = 3;
  448. #endif
  449. lib.errmsg = NULL;
  450. lib.handle = NULL;
  451. uv_loop_init(&loop);
  452. lwsl_notice(" Plugins:\n");
  453. while (d && *d) {
  454. lwsl_notice(" Scanning %s\n", *d);
  455. m =uv_fs_scandir(&loop, &req, *d, 0, NULL);
  456. if (m < 1) {
  457. lwsl_err("Scandir on %s failed\n", *d);
  458. return 1;
  459. }
  460. while (uv_fs_scandir_next(&req, &dent) != UV_EOF) {
  461. if (strlen(dent.name) < 7)
  462. continue;
  463. lwsl_notice(" %s\n", dent.name);
  464. lws_snprintf(path, sizeof(path) - 1, "%s/%s", *d, dent.name);
  465. if (uv_dlopen(path, &lib)) {
  466. uv_dlerror(&lib);
  467. lwsl_err("Error loading DSO: %s\n", lib.errmsg);
  468. goto bail;
  469. }
  470. /* we could open it, can we get his init function? */
  471. #if !defined(WIN32) && !defined(__MINGW32__)
  472. m = lws_snprintf(path, sizeof(path) - 1, "init_%s",
  473. dent.name + pofs /* snip lib... */);
  474. path[m - 3] = '\0'; /* snip the .so */
  475. #else
  476. m = lws_snprintf(path, sizeof(path) - 1, "init_%s",
  477. dent.name + pofs);
  478. path[m - 4] = '\0'; /* snip the .dll */
  479. #endif
  480. if (uv_dlsym(&lib, path, &v)) {
  481. uv_dlerror(&lib);
  482. lwsl_err("Failed to get %s on %s: %s", path,
  483. dent.name, lib.errmsg);
  484. goto bail;
  485. }
  486. initfunc = (lws_plugin_init_func)v;
  487. lcaps.api_magic = LWS_PLUGIN_API_MAGIC;
  488. m = initfunc(context, &lcaps);
  489. if (m) {
  490. lwsl_err("Initializing %s failed %d\n", dent.name, m);
  491. goto skip;
  492. }
  493. plugin = lws_malloc(sizeof(*plugin));
  494. if (!plugin) {
  495. lwsl_err("OOM\n");
  496. goto bail;
  497. }
  498. plugin->list = context->plugin_list;
  499. context->plugin_list = plugin;
  500. strncpy(plugin->name, dent.name, sizeof(plugin->name) - 1);
  501. plugin->name[sizeof(plugin->name) - 1] = '\0';
  502. plugin->lib = lib;
  503. plugin->caps = lcaps;
  504. context->plugin_protocol_count += lcaps.count_protocols;
  505. context->plugin_extension_count += lcaps.count_extensions;
  506. continue;
  507. skip:
  508. uv_dlclose(&lib);
  509. }
  510. bail:
  511. uv_fs_req_cleanup(&req);
  512. d++;
  513. }
  514. uv_run(&loop, UV_RUN_NOWAIT);
  515. uv_loop_close(&loop);
  516. return ret;
  517. }
  518. LWS_VISIBLE int
  519. lws_plat_plugins_destroy(struct lws_context *context)
  520. {
  521. struct lws_plugin *plugin = context->plugin_list, *p;
  522. lws_plugin_destroy_func func;
  523. char path[256];
  524. void *v;
  525. int m;
  526. int pofs = 0;
  527. #if defined(__MINGW32__) || !defined(WIN32)
  528. pofs = 3;
  529. #endif
  530. if (!plugin)
  531. return 0;
  532. // lwsl_notice("%s\n", __func__);
  533. while (plugin) {
  534. p = plugin;
  535. #if !defined(WIN32) && !defined(__MINGW32__)
  536. m = lws_snprintf(path, sizeof(path) - 1, "destroy_%s", plugin->name + pofs);
  537. path[m - 3] = '\0';
  538. #else
  539. m = lws_snprintf(path, sizeof(path) - 1, "destroy_%s", plugin->name + pofs);
  540. path[m - 4] = '\0';
  541. #endif
  542. if (uv_dlsym(&plugin->lib, path, &v)) {
  543. uv_dlerror(&plugin->lib);
  544. lwsl_err("Failed to get %s on %s: %s", path,
  545. plugin->name, plugin->lib.errmsg);
  546. } else {
  547. func = (lws_plugin_destroy_func)v;
  548. m = func(context);
  549. if (m)
  550. lwsl_err("Destroying %s failed %d\n",
  551. plugin->name, m);
  552. }
  553. uv_dlclose(&p->lib);
  554. plugin = p->list;
  555. p->list = NULL;
  556. free(p);
  557. }
  558. context->plugin_list = NULL;
  559. return 0;
  560. }
  561. #endif