userver: samples/testsuite-support/tests/test_metrics.py
Loading...
Searching...
No Matches
samples/testsuite-support/tests/test_metrics.py
1import pytest
4
5
6async def test_basic(service_client, monitor_client):
7 response = await service_client.get('/metrics')
8 assert response.status_code == 200
9 assert 'application/json' in response.headers['Content-Type']
10
11 metric = await monitor_client.single_metric('sample-metrics.foo')
12 assert metric.value > 0
13
14
15# /// [uservice_oneshot sample]
16@pytest.mark.uservice_oneshot
17async def test_initial_metrics(service_client, monitor_client):
18 metric = await monitor_client.single_metric('sample-metrics.foo')
19 assert metric.value == 0
20 # /// [uservice_oneshot sample]
21
22
23# /// [metrics reset]
24async def test_reset(service_client: pytest_userver.client.Client, monitor_client: pytest_userver.client.ClientMonitor):
25 # Reset service metrics via calling `ResetMetric` for all the metrics that have such function
26 await service_client.reset_metrics()
27
28 # Retrieve metrics
29 metric: pytest_userver.metrics.Metric = await monitor_client.single_metric('sample-metrics.foo')
30 assert metric.value == 0
31 assert not metric.labels
32 # /// [metrics reset]
33
34 response = await service_client.get('/metrics')
35 assert response.status_code == 200
36 assert 'application/json' in response.headers['Content-Type']
37
38 metric = await monitor_client.single_metric('sample-metrics.foo')
39 assert metric.value == 1
40
41 await service_client.reset_metrics()
42 metric = await monitor_client.single_metric('sample-metrics.foo')
43 assert metric.value == 0
44
45
46# /// [metrics labels]
47async def test_engine_metrics(service_client, monitor_client: pytest_userver.client.ClientMonitor):
48 metric: pytest_userver.metrics.Metric = await monitor_client.single_metric(
49 'engine.task-processors.tasks.finished.v2',
50 labels={'task_processor': 'main-task-processor'},
51 )
52 assert metric.value > 0
53 assert metric.labels == {'task_processor': 'main-task-processor'}
54
55 metrics_dict: pytest_userver.metrics.MetricsSnapshot = await monitor_client.metrics(
56 prefix='http.',
57 labels={'http_path': '/ping'},
58 )
59
60 assert metrics_dict
61 assert 'http.handler.cancelled-by-deadline' in metrics_dict
62
63 assert (
64 metrics_dict.value_at(
65 'http.handler.in-flight',
66 labels={
67 'http_path': '/ping',
68 'http_handler': 'handler-ping',
69 'version': '2',
70 },
71 )
72 == 0
73 )
74 # /// [metrics labels]
75
76
77# /// [metrics single_metric]
78async def test_engine_tasks_alive_metric(service_client, monitor_client: pytest_userver.client.ClientMonitor):
79 metric: pytest_userver.metrics.Metric = await monitor_client.single_metric(
80 'engine.task-processors.tasks.alive',
81 labels={'task_processor': 'main-task-processor'},
82 )
83 assert metric.value > 0
84 assert metric.labels == {'task_processor': 'main-task-processor'}
85 # /// [metrics single_metric]
86
87
88# /// [metrics single_metric_optional]
89async def test_some_optional_metric(service_client, monitor_client: pytest_userver.client.ClientMonitor):
90 metric: pytest_userver.metrics.Metric | None = await monitor_client.single_metric_optional(
91 'some.metric.error',
92 labels={'task_processor': 'main-task-processor'},
93 )
94 assert metric is None or metric.value == 0
95 # /// [metrics single_metric_optional]
96
97
98# /// [metrics diff]
99async def test_diff_metrics(service_client, monitor_client: pytest_userver.client.ClientMonitor):
100 async with monitor_client.metrics_diff(prefix='sample-metrics') as differ:
101 # Do something that makes the service update its metrics
102 response = await service_client.get('/metrics')
103 assert response.status == 200
104
105 # Checking diff of 'sample-metrics.foo' metric
106 assert differ.value_at('foo') == 1
107 # /// [metrics diff]
108
109
110# /// [metrics metrics]
111async def test_engine_logger_metrics(service_client, monitor_client: pytest_userver.client.ClientMonitor):
112 metrics_dict: pytest_userver.metrics.MetricsSnapshot = await monitor_client.metrics(
113 prefix='logger.',
114 labels={'logger': 'default'},
115 )
116
117 assert metrics_dict
118 assert 'logger.total' in metrics_dict
119 assert metrics_dict.value_at('logger.total') > 0
120
121 assert (
122 metrics_dict.value_at(
123 'logger.dropped',
124 labels={
125 'logger': 'default',
126 'version': '2',
127 },
128 )
129 == 0
130 )
131 # /// [metrics metrics]