xref: /libuv/test/test-metrics.c (revision 8a499e13)
1 /* Copyright libuv project contributors. All rights reserved.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a copy
4  * of this software and associated documentation files (the "Software"), to
5  * deal in the Software without restriction, including without limitation the
6  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7  * sell copies of the Software, and to permit persons to whom the Software is
8  * furnished to do so, subject to the following conditions:
9  *
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19  * IN THE SOFTWARE.
20  */
21 
22 #include "uv.h"
23 #include "task.h"
24 #include <string.h> /* memset */
25 
26 #define UV_NS_TO_MS 1000000
27 
28 typedef struct {
29   uv_fs_t open_req;
30   uv_fs_t write_req;
31   uv_fs_t close_req;
32 } fs_reqs_t;
33 
34 static uint64_t last_events_count;
35 static char test_buf[] = "test-buffer\n";
36 static fs_reqs_t fs_reqs;
37 static int pool_events_counter;
38 
39 
timer_spin_cb(uv_timer_t * handle)40 static void timer_spin_cb(uv_timer_t* handle) {
41   uint64_t t;
42 
43   (*(int*) handle->data)++;
44   t = uv_hrtime();
45   /* Spin for 500 ms to spin loop time out of the delta check. */
46   while (uv_hrtime() - t < 600 * UV_NS_TO_MS) { }
47 }
48 
49 
TEST_IMPL(metrics_idle_time)50 TEST_IMPL(metrics_idle_time) {
51 #if defined(__OpenBSD__)
52   RETURN_SKIP("Test does not currently work in OpenBSD");
53 #endif
54   const uint64_t timeout = 1000;
55   uv_timer_t timer;
56   uint64_t idle_time;
57   int cntr;
58 
59   cntr = 0;
60   timer.data = &cntr;
61 
62   ASSERT_OK(uv_loop_configure(uv_default_loop(), UV_METRICS_IDLE_TIME));
63   ASSERT_OK(uv_timer_init(uv_default_loop(), &timer));
64   ASSERT_OK(uv_timer_start(&timer, timer_spin_cb, timeout, 0));
65 
66   ASSERT_OK(uv_run(uv_default_loop(), UV_RUN_DEFAULT));
67   ASSERT_GT(cntr, 0);
68 
69   idle_time = uv_metrics_idle_time(uv_default_loop());
70 
71   /* Permissive check that the idle time matches within the timeout ±500 ms. */
72   ASSERT_LE(idle_time, (timeout + 500) * UV_NS_TO_MS);
73   ASSERT_GE(idle_time, (timeout - 500) * UV_NS_TO_MS);
74 
75   MAKE_VALGRIND_HAPPY(uv_default_loop());
76   return 0;
77 }
78 
79 
metrics_routine_cb(void * arg)80 static void metrics_routine_cb(void* arg) {
81   const uint64_t timeout = 1000;
82   uv_loop_t loop;
83   uv_timer_t timer;
84   uint64_t idle_time;
85   int cntr;
86 
87   cntr = 0;
88   timer.data = &cntr;
89 
90   ASSERT_OK(uv_loop_init(&loop));
91   ASSERT_OK(uv_loop_configure(&loop, UV_METRICS_IDLE_TIME));
92   ASSERT_OK(uv_timer_init(&loop, &timer));
93   ASSERT_OK(uv_timer_start(&timer, timer_spin_cb, timeout, 0));
94 
95   ASSERT_OK(uv_run(&loop, UV_RUN_DEFAULT));
96   ASSERT_GT(cntr, 0);
97 
98   idle_time = uv_metrics_idle_time(&loop);
99 
100   /* Only checking that idle time is greater than the lower bound since there
101    * may have been thread contention, causing the event loop to be delayed in
102    * the idle phase longer than expected.
103    */
104   ASSERT_GE(idle_time, (timeout - 500) * UV_NS_TO_MS);
105 
106   close_loop(&loop);
107   ASSERT_OK(uv_loop_close(&loop));
108 }
109 
110 
TEST_IMPL(metrics_idle_time_thread)111 TEST_IMPL(metrics_idle_time_thread) {
112   uv_thread_t threads[5];
113   int i;
114 
115   for (i = 0; i < 5; i++) {
116     ASSERT_OK(uv_thread_create(&threads[i], metrics_routine_cb, NULL));
117   }
118 
119   for (i = 0; i < 5; i++) {
120     uv_thread_join(&threads[i]);
121   }
122 
123   return 0;
124 }
125 
126 
timer_noop_cb(uv_timer_t * handle)127 static void timer_noop_cb(uv_timer_t* handle) {
128   (*(int*) handle->data)++;
129 }
130 
131 
TEST_IMPL(metrics_idle_time_zero)132 TEST_IMPL(metrics_idle_time_zero) {
133   uv_metrics_t metrics;
134   uv_timer_t timer;
135   int cntr;
136 
137   cntr = 0;
138   timer.data = &cntr;
139   ASSERT_OK(uv_loop_configure(uv_default_loop(), UV_METRICS_IDLE_TIME));
140   ASSERT_OK(uv_timer_init(uv_default_loop(), &timer));
141   ASSERT_OK(uv_timer_start(&timer, timer_noop_cb, 0, 0));
142 
143   ASSERT_OK(uv_run(uv_default_loop(), UV_RUN_DEFAULT));
144 
145   ASSERT_GT(cntr, 0);
146   ASSERT_OK(uv_metrics_idle_time(uv_default_loop()));
147 
148   ASSERT_OK(uv_metrics_info(uv_default_loop(), &metrics));
149   ASSERT_UINT64_EQ(cntr, metrics.loop_count);
150 
151   MAKE_VALGRIND_HAPPY(uv_default_loop());
152   return 0;
153 }
154 
155 
close_cb(uv_fs_t * req)156 static void close_cb(uv_fs_t* req) {
157   uv_metrics_t metrics;
158 
159   ASSERT_OK(uv_metrics_info(uv_default_loop(), &metrics));
160   ASSERT_UINT64_EQ(3, metrics.loop_count);
161   ASSERT_UINT64_GT(metrics.events, last_events_count);
162 
163   uv_fs_req_cleanup(req);
164   last_events_count = metrics.events;
165 }
166 
167 
write_cb(uv_fs_t * req)168 static void write_cb(uv_fs_t* req) {
169   uv_metrics_t metrics;
170 
171   ASSERT_OK(uv_metrics_info(uv_default_loop(), &metrics));
172   ASSERT_UINT64_EQ(2, metrics.loop_count);
173   ASSERT_UINT64_GT(metrics.events, last_events_count);
174   ASSERT_EQ(req->result, sizeof(test_buf));
175 
176   uv_fs_req_cleanup(req);
177   last_events_count = metrics.events;
178 
179   ASSERT_OK(uv_fs_close(uv_default_loop(),
180                         &fs_reqs.close_req,
181                         fs_reqs.open_req.result,
182                         close_cb));
183 }
184 
185 
create_cb(uv_fs_t * req)186 static void create_cb(uv_fs_t* req) {
187   uv_metrics_t metrics;
188 
189   ASSERT_OK(uv_metrics_info(uv_default_loop(), &metrics));
190   /* Event count here is still 0 so not going to check. */
191   ASSERT_UINT64_EQ(1, metrics.loop_count);
192   ASSERT_GE(req->result, 0);
193 
194   uv_fs_req_cleanup(req);
195   last_events_count = metrics.events;
196 
197   uv_buf_t iov = uv_buf_init(test_buf, sizeof(test_buf));
198   ASSERT_OK(uv_fs_write(uv_default_loop(),
199                         &fs_reqs.write_req,
200                         req->result,
201                         &iov,
202                         1,
203                         0,
204                         write_cb));
205 }
206 
207 
prepare_cb(uv_prepare_t * handle)208 static void prepare_cb(uv_prepare_t* handle) {
209   uv_metrics_t metrics;
210 
211   uv_prepare_stop(handle);
212 
213   ASSERT_OK(uv_metrics_info(uv_default_loop(), &metrics));
214   ASSERT_UINT64_EQ(0, metrics.loop_count);
215   ASSERT_UINT64_EQ(0, metrics.events);
216 
217   ASSERT_OK(uv_fs_open(uv_default_loop(),
218                        &fs_reqs.open_req,
219                        "test_file",
220                        UV_FS_O_WRONLY | UV_FS_O_CREAT, S_IRUSR | S_IWUSR,
221                        create_cb));
222 }
223 
224 
TEST_IMPL(metrics_info_check)225 TEST_IMPL(metrics_info_check) {
226   uv_fs_t unlink_req;
227   uv_prepare_t prepare;
228 
229   uv_fs_unlink(NULL, &unlink_req, "test_file", NULL);
230   uv_fs_req_cleanup(&unlink_req);
231 
232   ASSERT_OK(uv_prepare_init(uv_default_loop(), &prepare));
233   ASSERT_OK(uv_prepare_start(&prepare, prepare_cb));
234 
235   ASSERT_OK(uv_run(uv_default_loop(), UV_RUN_DEFAULT));
236 
237   uv_fs_unlink(NULL, &unlink_req, "test_file", NULL);
238   uv_fs_req_cleanup(&unlink_req);
239 
240   MAKE_VALGRIND_HAPPY(uv_default_loop());
241   return 0;
242 }
243 
244 
fs_prepare_cb(uv_prepare_t * handle)245 static void fs_prepare_cb(uv_prepare_t* handle) {
246   uv_metrics_t metrics;
247 
248   ASSERT_OK(uv_metrics_info(uv_default_loop(), &metrics));
249 
250   if (pool_events_counter == 1)
251     ASSERT_EQ(metrics.events, metrics.events_waiting);
252 
253   if (pool_events_counter < 7)
254     return;
255 
256   uv_prepare_stop(handle);
257   pool_events_counter = -42;
258 }
259 
260 
fs_stat_cb(uv_fs_t * req)261 static void fs_stat_cb(uv_fs_t* req) {
262   uv_fs_req_cleanup(req);
263   pool_events_counter++;
264 }
265 
266 
fs_work_cb(uv_work_t * req)267 static void fs_work_cb(uv_work_t* req) {
268 }
269 
270 
fs_after_work_cb(uv_work_t * req,int status)271 static void fs_after_work_cb(uv_work_t* req, int status) {
272   free(req);
273   pool_events_counter++;
274 }
275 
276 
fs_write_cb(uv_fs_t * req)277 static void fs_write_cb(uv_fs_t* req) {
278   uv_work_t* work1 = malloc(sizeof(*work1));
279   uv_work_t* work2 = malloc(sizeof(*work2));
280   pool_events_counter++;
281 
282   uv_fs_req_cleanup(req);
283 
284   ASSERT_OK(uv_queue_work(uv_default_loop(),
285                           work1,
286                           fs_work_cb,
287                           fs_after_work_cb));
288   ASSERT_OK(uv_queue_work(uv_default_loop(),
289                           work2,
290                           fs_work_cb,
291                           fs_after_work_cb));
292 }
293 
294 
fs_random_cb(uv_random_t * req,int status,void * buf,size_t len)295 static void fs_random_cb(uv_random_t* req, int status, void* buf, size_t len) {
296   pool_events_counter++;
297 }
298 
299 
fs_addrinfo_cb(uv_getaddrinfo_t * req,int status,struct addrinfo * res)300 static void fs_addrinfo_cb(uv_getaddrinfo_t* req,
301                            int status,
302                            struct addrinfo* res) {
303   uv_freeaddrinfo(req->addrinfo);
304   pool_events_counter++;
305 }
306 
307 
TEST_IMPL(metrics_pool_events)308 TEST_IMPL(metrics_pool_events) {
309   uv_buf_t iov;
310   uv_fs_t open_req;
311   uv_fs_t stat1_req;
312   uv_fs_t stat2_req;
313   uv_fs_t unlink_req;
314   uv_fs_t write_req;
315   uv_getaddrinfo_t addrinfo_req;
316   uv_metrics_t metrics;
317   uv_prepare_t prepare;
318   uv_random_t random_req;
319   int fd;
320   char rdata;
321 
322   ASSERT_OK(uv_loop_configure(uv_default_loop(), UV_METRICS_IDLE_TIME));
323 
324   uv_fs_unlink(NULL, &unlink_req, "test_file", NULL);
325   uv_fs_req_cleanup(&unlink_req);
326 
327   ASSERT_OK(uv_prepare_init(uv_default_loop(), &prepare));
328   ASSERT_OK(uv_prepare_start(&prepare, fs_prepare_cb));
329 
330   pool_events_counter = 0;
331   fd = uv_fs_open(NULL,
332                   &open_req, "test_file", UV_FS_O_WRONLY | UV_FS_O_CREAT,
333                   S_IRUSR | S_IWUSR,
334                   NULL);
335   ASSERT_GT(fd, 0);
336   uv_fs_req_cleanup(&open_req);
337 
338   iov = uv_buf_init(test_buf, sizeof(test_buf));
339   ASSERT_OK(uv_fs_write(uv_default_loop(),
340                         &write_req,
341                         fd,
342                         &iov,
343                         1,
344                         0,
345                         fs_write_cb));
346   ASSERT_OK(uv_fs_stat(uv_default_loop(),
347                        &stat1_req,
348                        "test_file",
349                        fs_stat_cb));
350   ASSERT_OK(uv_fs_stat(uv_default_loop(),
351                        &stat2_req,
352                        "test_file",
353                        fs_stat_cb));
354   ASSERT_OK(uv_random(uv_default_loop(),
355                       &random_req,
356                       &rdata,
357                       1,
358                       0,
359                       fs_random_cb));
360   ASSERT_OK(uv_getaddrinfo(uv_default_loop(),
361                            &addrinfo_req,
362                            fs_addrinfo_cb,
363                            "example.invalid",
364                            NULL,
365                            NULL));
366 
367   /* Sleep for a moment to hopefully force the events to complete before
368    * entering the event loop. */
369   uv_sleep(100);
370 
371   ASSERT_OK(uv_run(uv_default_loop(), UV_RUN_DEFAULT));
372 
373   ASSERT_OK(uv_metrics_info(uv_default_loop(), &metrics));
374   /* It's possible for uv__work_done() to execute one extra time even though the
375    * QUEUE has already been cleared out. This has to do with the way we use an
376    * uv_async to tell the event loop thread to process the worker pool QUEUE. */
377   ASSERT_GE(metrics.events, 7);
378   /* It's possible one of the other events also got stuck in the event queue, so
379    * check GE instead of EQ. Reason for 4 instead of 5 is because the call to
380    * uv_getaddrinfo() is racey and slow. So can't guarantee that it'll always
381    * execute before sleep completes. */
382   ASSERT_GE(metrics.events_waiting, 4);
383   ASSERT_EQ(pool_events_counter, -42);
384 
385   uv_fs_unlink(NULL, &unlink_req, "test_file", NULL);
386   uv_fs_req_cleanup(&unlink_req);
387 
388   MAKE_VALGRIND_HAPPY(uv_default_loop());
389   return 0;
390 }
391