Skip to content

Commit

Permalink
chore: revert tracking metrics_type
Browse files Browse the repository at this point in the history
Signed-off-by: Aaron Pham <29749331+aarnphm@users.noreply.github.com>
  • Loading branch information
aarnphm committed Sep 26, 2022
1 parent ab97dbe commit 4db6f76
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 29 deletions.
6 changes: 0 additions & 6 deletions bentoml/_internal/utils/analytics/schemas.py
Expand Up @@ -209,12 +209,6 @@ class ServeUpdateEvent(EventMeta):
attr.validators.in_(["standalone", "api_server", "runner"]),
)
)
metrics_type: str = attr.field(
validator=attr.validators.and_(
attr.validators.instance_of(str),
attr.validators.in_(["not_available", "legacy", "current"]),
),
)
metrics: t.List[t.Any] = attr.field(factory=list)


Expand Down
24 changes: 7 additions & 17 deletions bentoml/_internal/utils/analytics/usage_stats.py
Expand Up @@ -184,7 +184,7 @@ def filter_metrics(
def get_metrics_report(
metrics_client: PrometheusClient,
grpc: bool = False,
) -> tuple[list[dict[str, str | float]], bool | None]:
) -> list[dict[str, str | float]]:
"""
Get Prometheus metrics reports from the metrics client. This will be used to determine tracking events.
If the return metrics are legacy metrics, the metrics will have prefix BENTOML_, otherwise they will have prefix bentoml_
Expand Down Expand Up @@ -221,17 +221,12 @@ def get_metrics_report(
lambda samples: [s for s in samples if "endpoint" in s.labels],
]
# If metrics prefix is BENTOML_, this is legacy metrics
if metric_name.endswith("_request"):
if metric_name.startswith("BENTOML_"):
# This is the case where we have legacy metrics with
# newer metrics. We will ignore the newer metrics.
return filter_metrics(metric_samples, *_filters), True
else:
# This is the case where we only have newer metrics
assert metric_name.startswith("bentoml_")
return filter_metrics(metric_samples, *_filters), False
if metric_name.endswith("_request") and (
metric_name.startswith("bentoml_") or metric_name.startswith("BENTOML_")
):
return filter_metrics(metric_samples, *_filters)

return [], None
return []


@inject
Expand Down Expand Up @@ -262,10 +257,6 @@ def track_serve(
def loop() -> t.NoReturn: # type: ignore
last_tracked_timestamp: datetime = serve_info.serve_started_timestamp
while not stop_event.wait(tracking_interval): # pragma: no cover
metrics_type = "not_available"
metrics, use_legacy_metrics = get_metrics_report(metrics_client, grpc=grpc)
if use_legacy_metrics is not None:
metrics_type = "legacy" if use_legacy_metrics else "current"
now = datetime.now(timezone.utc)
event_properties = ServeUpdateEvent(
serve_id=serve_info.serve_id,
Expand All @@ -274,8 +265,7 @@ def loop() -> t.NoReturn: # type: ignore
component=component,
triggered_at=now,
duration_in_seconds=int((now - last_tracked_timestamp).total_seconds()),
metrics_type=metrics_type,
metrics=metrics,
metrics=get_metrics_report(metrics_client, grpc=grpc),
)
last_tracked_timestamp = now
track(event_properties)
Expand Down
10 changes: 4 additions & 6 deletions tests/unit/_internal/utils/test_analytics.py
Expand Up @@ -193,10 +193,10 @@ def test_track_serve_init_no_bento(
@pytest.mark.parametrize(
"mock_output,expected",
[
(b"", tuple([[], None])),
(b"", []),
(
b"""# HELP BENTOML_noop_request_total Multiprocess metric""",
tuple([[], None]),
[],
),
],
)
Expand Down Expand Up @@ -250,7 +250,7 @@ def test_legacy_get_metrics_report(
"utf-8"
)
)
output, _ = analytics.usage_stats.get_metrics_report(mock_prometheus_client)
output = analytics.usage_stats.get_metrics_report(mock_prometheus_client)
assert {
"endpoint": "/predict",
"http_response_code": "200",
Expand Down Expand Up @@ -308,9 +308,7 @@ def test_get_metrics_report(
mock_prometheus_client.text_string_to_metric_families.return_value = (
generated_metrics
)
output, _ = analytics.usage_stats.get_metrics_report(
mock_prometheus_client, grpc=grpc
)
output = analytics.usage_stats.get_metrics_report(mock_prometheus_client, grpc=grpc)
if expected:
assert expected in output

Expand Down

0 comments on commit 4db6f76

Please sign in to comment.