Script 'mail_helper' called by obssrc
Hello community,
here is the log from the commit of package
python-opentelemetry-exporter-otlp-proto-http for openSUSE:Factory checked in
at 2026-04-14 17:48:50
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing
/work/SRC/openSUSE:Factory/python-opentelemetry-exporter-otlp-proto-http (Old)
and
/work/SRC/openSUSE:Factory/.python-opentelemetry-exporter-otlp-proto-http.new.21863
(New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "python-opentelemetry-exporter-otlp-proto-http"
Tue Apr 14 17:48:50 2026 rev:13 rq:1346229 version:1.41.0
Changes:
--------
---
/work/SRC/openSUSE:Factory/python-opentelemetry-exporter-otlp-proto-http/python-opentelemetry-exporter-otlp-proto-http.changes
2026-03-30 18:33:48.721361219 +0200
+++
/work/SRC/openSUSE:Factory/.python-opentelemetry-exporter-otlp-proto-http.new.21863/python-opentelemetry-exporter-otlp-proto-http.changes
2026-04-14 17:49:15.461866399 +0200
@@ -1,0 +2,11 @@
+Sun Apr 12 17:37:10 UTC 2026 - Dirk Müller <[email protected]>
+
+- update to 1.41.0:
+ * Enabled the flake8-tidy-import plugins rules for the ruff
+ linter. These rules throw warnings for relative imports in
+ the modules.
+ * improve check-links ci job
+ * `opentelemetry-exporter-otlp-proto-http`: use consistent
+ protobuf for export request
+
+-------------------------------------------------------------------
Old:
----
opentelemetry_exporter_otlp_proto_http-1.40.0.tar.gz
New:
----
opentelemetry_exporter_otlp_proto_http-1.41.0.tar.gz
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ python-opentelemetry-exporter-otlp-proto-http.spec ++++++
--- /var/tmp/diff_new_pack.mjE0Z8/_old 2026-04-14 17:49:16.309901452 +0200
+++ /var/tmp/diff_new_pack.mjE0Z8/_new 2026-04-14 17:49:16.313901617 +0200
@@ -17,7 +17,7 @@
Name: python-opentelemetry-exporter-otlp-proto-http
-Version: 1.40.0
+Version: 1.41.0
Release: 0
Summary: OpenTelemetry Collector Protobuf over HTTP Exporter
License: Apache-2.0
@@ -27,25 +27,25 @@
BuildRequires: %{python_module pip}
BuildRequires: python-rpm-macros
# SECTION test requirements
-BuildRequires: %{python_module Deprecated >= 1.2.6}
BuildRequires: %{python_module googleapis-common-protos >= 1.52}
BuildRequires: %{python_module opentelemetry-api >= 1.15}
BuildRequires: %{python_module opentelemetry-exporter-otlp-proto-common =
%{version}}
BuildRequires: %{python_module opentelemetry-proto = %{version}}
-BuildRequires: %{python_module opentelemetry-sdk >= 1.23.0}
-BuildRequires: %{python_module opentelemetry-test-utils = 0.61b0}
+BuildRequires: %{python_module opentelemetry-sdk >= %{version}}
+BuildRequires: %{python_module opentelemetry-test-utils = 0.62b0}
BuildRequires: %{python_module pytest}
BuildRequires: %{python_module requests >= 2.7}
BuildRequires: %{python_module responses >= 0.22.0}
+BuildRequires: %{python_module typing-extensions >= 4.5.0}
# /SECTION
BuildRequires: fdupes
-Requires: python-Deprecated >= 1.2.6
Requires: python-googleapis-common-protos >= 1.52
Requires: python-opentelemetry-api >= 1.15
Requires: python-opentelemetry-exporter-otlp-proto-common = %{version}
Requires: python-opentelemetry-proto = %{version}
-Requires: python-opentelemetry-sdk >= 1.23.0
+Requires: python-opentelemetry-sdk >= %{version}
Requires: python-requests >= 2.7
+Requires: python-typing-extensions >= 4.5.0
BuildArch: noarch
%python_subpackages
++++++ opentelemetry_exporter_otlp_proto_http-1.40.0.tar.gz ->
opentelemetry_exporter_otlp_proto_http-1.41.0.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/opentelemetry_exporter_otlp_proto_http-1.40.0/PKG-INFO
new/opentelemetry_exporter_otlp_proto_http-1.41.0/PKG-INFO
--- old/opentelemetry_exporter_otlp_proto_http-1.40.0/PKG-INFO 2020-02-02
01:00:00.000000000 +0100
+++ new/opentelemetry_exporter_otlp_proto_http-1.41.0/PKG-INFO 2020-02-02
01:00:00.000000000 +0100
@@ -1,6 +1,6 @@
Metadata-Version: 2.4
Name: opentelemetry-exporter-otlp-proto-http
-Version: 1.40.0
+Version: 1.41.0
Summary: OpenTelemetry Collector Protobuf over HTTP Exporter
Project-URL: Homepage,
https://github.com/open-telemetry/opentelemetry-python/tree/main/exporter/opentelemetry-exporter-otlp-proto-http
Project-URL: Repository, https://github.com/open-telemetry/opentelemetry-python
@@ -22,9 +22,9 @@
Requires-Python: >=3.9
Requires-Dist: googleapis-common-protos~=1.52
Requires-Dist: opentelemetry-api~=1.15
-Requires-Dist: opentelemetry-exporter-otlp-proto-common==1.40.0
-Requires-Dist: opentelemetry-proto==1.40.0
-Requires-Dist: opentelemetry-sdk~=1.40.0
+Requires-Dist: opentelemetry-exporter-otlp-proto-common==1.41.0
+Requires-Dist: opentelemetry-proto==1.41.0
+Requires-Dist: opentelemetry-sdk~=1.41.0
Requires-Dist: requests~=2.7
Requires-Dist: typing-extensions>=4.5.0
Provides-Extra: gcp-auth
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/opentelemetry_exporter_otlp_proto_http-1.40.0/pyproject.toml
new/opentelemetry_exporter_otlp_proto_http-1.41.0/pyproject.toml
--- old/opentelemetry_exporter_otlp_proto_http-1.40.0/pyproject.toml
2020-02-02 01:00:00.000000000 +0100
+++ new/opentelemetry_exporter_otlp_proto_http-1.41.0/pyproject.toml
2020-02-02 01:00:00.000000000 +0100
@@ -29,9 +29,9 @@
dependencies = [
"googleapis-common-protos ~= 1.52",
"opentelemetry-api ~= 1.15",
- "opentelemetry-proto == 1.40.0",
- "opentelemetry-sdk ~= 1.40.0",
- "opentelemetry-exporter-otlp-proto-common == 1.40.0",
+ "opentelemetry-proto == 1.41.0",
+ "opentelemetry-sdk ~= 1.41.0",
+ "opentelemetry-exporter-otlp-proto-common == 1.41.0",
"requests ~= 2.7",
"typing-extensions >= 4.5.0",
]
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/opentelemetry_exporter_otlp_proto_http-1.40.0/src/opentelemetry/exporter/otlp/proto/http/_common/__init__.py
new/opentelemetry_exporter_otlp_proto_http-1.41.0/src/opentelemetry/exporter/otlp/proto/http/_common/__init__.py
---
old/opentelemetry_exporter_otlp_proto_http-1.40.0/src/opentelemetry/exporter/otlp/proto/http/_common/__init__.py
2020-02-02 01:00:00.000000000 +0100
+++
new/opentelemetry_exporter_otlp_proto_http-1.41.0/src/opentelemetry/exporter/otlp/proto/http/_common/__init__.py
2020-02-02 01:00:00.000000000 +0100
@@ -19,9 +19,6 @@
from opentelemetry.sdk.environment_variables import (
_OTEL_PYTHON_EXPORTER_OTLP_HTTP_CREDENTIAL_PROVIDER,
- _OTEL_PYTHON_EXPORTER_OTLP_HTTP_LOGS_CREDENTIAL_PROVIDER,
- _OTEL_PYTHON_EXPORTER_OTLP_HTTP_METRICS_CREDENTIAL_PROVIDER,
- _OTEL_PYTHON_EXPORTER_OTLP_HTTP_TRACES_CREDENTIAL_PROVIDER,
)
from opentelemetry.util._importlib_metadata import entry_points
@@ -36,9 +33,9 @@
def _load_session_from_envvar(
cred_envvar: Literal[
- _OTEL_PYTHON_EXPORTER_OTLP_HTTP_LOGS_CREDENTIAL_PROVIDER,
- _OTEL_PYTHON_EXPORTER_OTLP_HTTP_TRACES_CREDENTIAL_PROVIDER,
- _OTEL_PYTHON_EXPORTER_OTLP_HTTP_METRICS_CREDENTIAL_PROVIDER,
+ "OTEL_PYTHON_EXPORTER_OTLP_HTTP_LOGS_CREDENTIAL_PROVIDER",
+ "OTEL_PYTHON_EXPORTER_OTLP_HTTP_TRACES_CREDENTIAL_PROVIDER",
+ "OTEL_PYTHON_EXPORTER_OTLP_HTTP_METRICS_CREDENTIAL_PROVIDER",
],
) -> Optional[requests.Session]:
_credential_env = environ.get(
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/opentelemetry_exporter_otlp_proto_http-1.40.0/src/opentelemetry/exporter/otlp/proto/http/metric_exporter/__init__.py
new/opentelemetry_exporter_otlp_proto_http-1.41.0/src/opentelemetry/exporter/otlp/proto/http/metric_exporter/__init__.py
---
old/opentelemetry_exporter_otlp_proto_http-1.40.0/src/opentelemetry/exporter/otlp/proto/http/metric_exporter/__init__.py
2020-02-02 01:00:00.000000000 +0100
+++
new/opentelemetry_exporter_otlp_proto_http-1.41.0/src/opentelemetry/exporter/otlp/proto/http/metric_exporter/__init__.py
2020-02-02 01:00:00.000000000 +0100
@@ -24,10 +24,9 @@
Any,
Callable,
Dict,
+ Iterable,
List,
- Mapping,
Optional,
- Sequence,
)
import requests
@@ -51,7 +50,7 @@
_is_retryable,
_load_session_from_envvar,
)
-from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2 import ( #
noqa: F401
+from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2 import (
ExportMetricsServiceRequest,
)
from opentelemetry.proto.common.v1.common_pb2 import ( # noqa: F401
@@ -61,7 +60,7 @@
KeyValue,
KeyValueList,
)
-from opentelemetry.proto.metrics.v1 import metrics_pb2 as pb2 # noqa: F401
+from opentelemetry.proto.metrics.v1 import metrics_pb2 as pb2
from opentelemetry.proto.resource.v1.resource_pb2 import Resource # noqa: F401
from opentelemetry.proto.resource.v1.resource_pb2 import (
Resource as PB2Resource,
@@ -122,7 +121,29 @@
preferred_temporality: dict[type, AggregationTemporality]
| None = None,
preferred_aggregation: dict[type, Aggregation] | None = None,
+ max_export_batch_size: int | None = None,
):
+ """OTLP HTTP metrics exporter
+
+ Args:
+ endpoint: Target URL to which the exporter is going to send metrics
+ certificate_file: Path to the certificate file to use for any TLS
+ client_key_file: Path to the client key file to use for any TLS
+ client_certificate_file: Path to the client certificate file to
use for any TLS
+ headers: Headers to be sent with HTTP requests at export
+ timeout: Timeout in seconds for export
+ compression: Compression to use; one of none, gzip, deflate
+ session: Requests session to use at export
+ preferred_temporality: Map of preferred temporality for each
metric type.
+ See `opentelemetry.sdk.metrics.export.MetricReader` for more
details on what
+ preferred temporality is.
+ preferred_aggregation: Map of preferred aggregation for each
metric type.
+ See `opentelemetry.sdk.metrics.export.MetricReader` for more
details on what
+ preferred aggregation is.
+ max_export_batch_size: Maximum number of data points to export in
a single request.
+ If not set there is no limit to the number of data points in a
request.
+ If it is set and the number of data points exceeds the max,
the request will be split.
+ """
self._shutdown_in_progress = threading.Event()
self._endpoint = endpoint or environ.get(
OTEL_EXPORTER_OTLP_METRICS_ENDPOINT,
@@ -180,6 +201,7 @@
self._common_configuration(
preferred_temporality, preferred_aggregation
)
+ self._max_export_batch_size: int | None = max_export_batch_size
self._shutdown = False
def _export(
@@ -219,17 +241,21 @@
)
return resp
- def export(
+ def _export_with_retries(
self,
- metrics_data: MetricsData,
- timeout_millis: Optional[float] = 10000,
- **kwargs,
+ export_request: ExportMetricsServiceRequest,
+ deadline_sec: float,
) -> MetricExportResult:
- if self._shutdown:
- _logger.warning("Exporter already shutdown, ignoring batch")
- return MetricExportResult.FAILURE
- serialized_data = encode_metrics(metrics_data).SerializeToString()
- deadline_sec = time() + self._timeout
+ """Export serialized data with retry logic until success,
non-transient error, or exponential backoff maxed out.
+
+ Args:
+ export_request: ExportMetricsServiceRequest object containing
metrics data to export
+ deadline_sec: timestamp deadline for the export
+
+ Returns:
+ MetricExportResult: SUCCESS if export succeeded, FAILURE otherwise
+ """
+ serialized_data = export_request.SerializeToString()
for retry_num in range(_MAX_RETRYS):
# multiplying by a random number between .8 and 1.2 introduces a
+/20% jitter to each backoff.
backoff_seconds = 2**retry_num * random.uniform(0.8, 1.2)
@@ -263,6 +289,7 @@
"max retries or shutdown."
)
return MetricExportResult.FAILURE
+
_logger.warning(
"Transient error %s encountered while exporting metrics batch,
retrying in %.2fs.",
reason,
@@ -274,6 +301,39 @@
break
return MetricExportResult.FAILURE
+ def export(
+ self,
+ metrics_data: MetricsData,
+ timeout_millis: Optional[float] = 10000,
+ **kwargs,
+ ) -> MetricExportResult:
+ if self._shutdown:
+ _logger.warning("Exporter already shutdown, ignoring batch")
+ return MetricExportResult.FAILURE
+
+ export_request = encode_metrics(metrics_data)
+ deadline_sec = time() + self._timeout
+
+ # If no batch size configured, export as single batch with retries as
configured
+ if self._max_export_batch_size is None:
+ return self._export_with_retries(export_request, deadline_sec)
+
+ # Else, export in batches of configured size
+ batched_export_requests = _split_metrics_data(
+ export_request, self._max_export_batch_size
+ )
+
+ for split_metrics_data in batched_export_requests:
+ export_result = self._export_with_retries(
+ split_metrics_data,
+ deadline_sec,
+ )
+ if export_result != MetricExportResult.SUCCESS:
+ return MetricExportResult.FAILURE
+
+ # Only returns SUCCESS if all batches succeeded
+ return MetricExportResult.SUCCESS
+
def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
if self._shutdown:
_logger.warning("Exporter already shutdown, ignoring call")
@@ -291,6 +351,315 @@
return True
+def _split_metrics_data(
+ metrics_data: ExportMetricsServiceRequest,
+ max_export_batch_size: int | None = None,
+) -> Iterable[ExportMetricsServiceRequest]:
+ """Splits metrics data into several ExportMetricsServiceRequest (copies
protobuf originals),
+ based on configured data point max export batch size.
+
+ Args:
+ metrics_data: metrics object based on HTTP protocol buffer definition
+
+ Returns:
+ Iterable[ExportMetricsServiceRequest]: An iterable of
ExportMetricsServiceRequest objects containing
+ ExportMetricsServiceRequest.ResourceMetrics,
ExportMetricsServiceRequest.ScopeMetrics, ExportMetricsServiceRequest.Metrics,
and data points
+ """
+ if not max_export_batch_size:
+ return metrics_data
+
+ batch_size: int = 0
+ # Stores split metrics data as editable references
+ # used to write batched pb2 objects for export when finalized
+ split_resource_metrics = []
+
+ for resource_metrics in metrics_data.resource_metrics:
+ split_scope_metrics = []
+ split_resource_metrics.append(
+ {
+ "resource": resource_metrics.resource,
+ "schema_url": resource_metrics.schema_url,
+ "scope_metrics": split_scope_metrics,
+ }
+ )
+
+ for scope_metrics in resource_metrics.scope_metrics:
+ split_metrics = []
+ split_scope_metrics.append(
+ {
+ "scope": scope_metrics.scope,
+ "schema_url": scope_metrics.schema_url,
+ "metrics": split_metrics,
+ }
+ )
+
+ for metric in scope_metrics.metrics:
+ split_data_points = []
+ field_name = metric.WhichOneof("data")
+ if not field_name:
+ _logger.warning(
+ "Tried to split and export an unsupported metric type.
Skipping."
+ )
+ continue
+
+ # Get data container using field name
+ # and build metric dictionary dynamically for conciseness
+ data_container = getattr(metric, field_name)
+ metric_dict = {
+ "name": metric.name,
+ "description": metric.description,
+ "unit": metric.unit,
+ field_name: {
+ "data_points": split_data_points,
+ },
+ }
+ if hasattr(data_container, "aggregation_temporality"):
+ metric_dict[field_name]["aggregation_temporality"] = (
+ data_container.aggregation_temporality
+ )
+ if hasattr(data_container, "is_monotonic"):
+ metric_dict[field_name]["is_monotonic"] = (
+ data_container.is_monotonic
+ )
+ split_metrics.append(metric_dict)
+
+ current_data_points = data_container.data_points
+ for data_point in current_data_points:
+ split_data_points.append(data_point)
+ batch_size += 1
+
+ if batch_size >= max_export_batch_size:
+ yield ExportMetricsServiceRequest(
+ resource_metrics=_get_split_resource_metrics_pb2(
+ split_resource_metrics
+ )
+ )
+
+ # Reset all the reference variables with current
metrics_data position
+ # minus yielded data_points. Need to clear data_points
and keep metric
+ # to avoid duplicate data_point export
+ batch_size = 0
+ split_data_points = []
+
+ # Rebuild metric dict generically using same approach
as initial creation
+ field_name = metric.WhichOneof("data")
+ if field_name is None:
+ _logger.warning(
+ "Tried to split and export an unsupported
metric type. Skipping."
+ )
+ continue
+ data_container = getattr(metric, field_name)
+ metric_dict = {
+ "name": metric.name,
+ "description": metric.description,
+ "unit": metric.unit,
+ field_name: {
+ "data_points": split_data_points,
+ },
+ }
+ if hasattr(data_container, "aggregation_temporality"):
+ metric_dict[field_name][
+ "aggregation_temporality"
+ ] = data_container.aggregation_temporality
+ if hasattr(data_container, "is_monotonic"):
+ metric_dict[field_name]["is_monotonic"] = (
+ data_container.is_monotonic
+ )
+
+ split_metrics = [metric_dict]
+ split_scope_metrics = [
+ {
+ "scope": scope_metrics.scope,
+ "schema_url": scope_metrics.schema_url,
+ "metrics": split_metrics,
+ }
+ ]
+ split_resource_metrics = [
+ {
+ "resource": resource_metrics.resource,
+ "schema_url": resource_metrics.schema_url,
+ "scope_metrics": split_scope_metrics,
+ }
+ ]
+
+ if not split_data_points:
+ # If data_points is empty remove the whole metric
+ split_metrics.pop()
+
+ if not split_metrics:
+ # If metrics is empty remove the whole scope_metrics
+ split_scope_metrics.pop()
+
+ if not split_scope_metrics:
+ # If scope_metrics is empty remove the whole resource_metrics
+ split_resource_metrics.pop()
+
+ if batch_size > 0:
+ yield ExportMetricsServiceRequest(
+ resource_metrics=_get_split_resource_metrics_pb2(
+ split_resource_metrics
+ )
+ )
+
+
+def _get_split_resource_metrics_pb2(
+ split_resource_metrics: List[Dict],
+) -> List[pb2.ResourceMetrics]:
+ """Helper that returns a list of pb2.ResourceMetrics objects based on
split_resource_metrics.
+ Example input:
+
+ ```python
+ [
+ {
+ "resource":
<opentelemetry.proto.resource.v1.resource_pb2.Resource>,
+ "schema_url": "http://foo-bar",
+ "scope_metrics": [
+ "scope": <opentelemetry.proto.common.v1.InstrumentationScope>,
+ "schema_url": "http://foo-baz",
+ "metrics": [
+ {
+ "name": "apples",
+ "description": "number of apples purchased",
+ "sum": {
+ "aggregation_temporality": 1,
+ "is_monotonic": "false",
+ "data_points": [
+ {
+ start_time_unix_nano: 1000
+ time_unix_nano: 1001
+ exemplars {
+ time_unix_nano: 1002
+ span_id: "foo-span"
+ trace_id: "foo-trace"
+ as_int: 5
+ }
+ as_int: 5
+ }
+ ]
+ }
+ },
+ ],
+ ],
+ },
+ ]
+ ```
+
+ Args:
+ split_resource_metrics: A list of dict representations of
ResourceMetrics,
+ ScopeMetrics, Metrics, and data points.
+
+ Returns:
+ List[pb2.ResourceMetrics]: A list of pb2.ResourceMetrics objects
containing
+ pb2.ScopeMetrics, pb2.Metrics, and data points
+ """
+ split_resource_metrics_pb = []
+ for resource_metrics in split_resource_metrics:
+ new_resource_metrics = pb2.ResourceMetrics(
+ resource=resource_metrics.get("resource"),
+ scope_metrics=[],
+ schema_url=resource_metrics.get("schema_url") or "",
+ )
+ for scope_metrics in resource_metrics.get("scope_metrics", []):
+ new_scope_metrics = pb2.ScopeMetrics(
+ scope=scope_metrics.get("scope"),
+ metrics=[],
+ schema_url=scope_metrics.get("schema_url") or "",
+ )
+
+ for metric in scope_metrics.get("metrics", []):
+ new_metric = None
+ data_points = []
+
+ if "sum" in metric:
+ new_metric = pb2.Metric(
+ name=metric.get("name"),
+ description=metric.get("description"),
+ unit=metric.get("unit"),
+ sum=pb2.Sum(
+ data_points=[],
+ aggregation_temporality=metric.get("sum").get(
+ "aggregation_temporality"
+ ),
+ is_monotonic=metric.get("sum").get("is_monotonic"),
+ ),
+ )
+ data_points = metric.get("sum").get("data_points")
+ elif "histogram" in metric:
+ new_metric = pb2.Metric(
+ name=metric.get("name"),
+ description=metric.get("description"),
+ unit=metric.get("unit"),
+ histogram=pb2.Histogram(
+ data_points=[],
+ aggregation_temporality=metric.get(
+ "histogram"
+ ).get("aggregation_temporality"),
+ ),
+ )
+ data_points = metric.get("histogram").get("data_points")
+ elif "exponential_histogram" in metric:
+ new_metric = pb2.Metric(
+ name=metric.get("name"),
+ description=metric.get("description"),
+ unit=metric.get("unit"),
+ exponential_histogram=pb2.ExponentialHistogram(
+ data_points=[],
+ aggregation_temporality=metric.get(
+ "exponential_histogram"
+ ).get("aggregation_temporality"),
+ ),
+ )
+ data_points = metric.get("exponential_histogram").get(
+ "data_points"
+ )
+ elif "gauge" in metric:
+ new_metric = pb2.Metric(
+ name=metric.get("name"),
+ description=metric.get("description"),
+ unit=metric.get("unit"),
+ gauge=pb2.Gauge(
+ data_points=[],
+ ),
+ )
+ data_points = metric.get("gauge").get("data_points")
+ elif "summary" in metric:
+ new_metric = pb2.Metric(
+ name=metric.get("name"),
+ description=metric.get("description"),
+ unit=metric.get("unit"),
+ summary=pb2.Summary(
+ data_points=[],
+ ),
+ )
+ data_points = metric.get("summary").get("data_points")
+ else:
+ _logger.warning(
+ "Tried to split and export an unsupported metric type.
Skipping."
+ )
+ continue
+
+ # Append data points generically using the field name from the
metric dict
+ for field_name in [
+ "sum",
+ "histogram",
+ "exponential_histogram",
+ "gauge",
+ "summary",
+ ]:
+ if field_name in metric:
+ metric_data_container = getattr(new_metric, field_name)
+ for data_point in data_points:
+ metric_data_container.data_points.append(
+ data_point
+ )
+ break
+
+ new_scope_metrics.metrics.append(new_metric)
+ new_resource_metrics.scope_metrics.append(new_scope_metrics)
+ split_resource_metrics_pb.append(new_resource_metrics)
+ return split_resource_metrics_pb
+
+
@deprecated(
"Use one of the encoders from opentelemetry-exporter-otlp-proto-common
instead. Deprecated since version 1.18.0.",
)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/opentelemetry_exporter_otlp_proto_http-1.40.0/src/opentelemetry/exporter/otlp/proto/http/version/__init__.py
new/opentelemetry_exporter_otlp_proto_http-1.41.0/src/opentelemetry/exporter/otlp/proto/http/version/__init__.py
---
old/opentelemetry_exporter_otlp_proto_http-1.40.0/src/opentelemetry/exporter/otlp/proto/http/version/__init__.py
2020-02-02 01:00:00.000000000 +0100
+++
new/opentelemetry_exporter_otlp_proto_http-1.41.0/src/opentelemetry/exporter/otlp/proto/http/version/__init__.py
2020-02-02 01:00:00.000000000 +0100
@@ -12,4 +12,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-__version__ = "1.40.0"
+__version__ = "1.41.0"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/opentelemetry_exporter_otlp_proto_http-1.40.0/tests/metrics/test_otlp_metrics_exporter.py
new/opentelemetry_exporter_otlp_proto_http-1.41.0/tests/metrics/test_otlp_metrics_exporter.py
---
old/opentelemetry_exporter_otlp_proto_http-1.40.0/tests/metrics/test_otlp_metrics_exporter.py
2020-02-02 01:00:00.000000000 +0100
+++
new/opentelemetry_exporter_otlp_proto_http-1.41.0/tests/metrics/test_otlp_metrics_exporter.py
2020-02-02 01:00:00.000000000 +0100
@@ -12,10 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+# pylint: disable=too-many-lines
import threading
import time
from logging import WARNING
from os import environ
+from typing import List
from unittest import TestCase
from unittest.mock import ANY, MagicMock, Mock, patch
@@ -34,8 +36,21 @@
DEFAULT_METRICS_EXPORT_PATH,
DEFAULT_TIMEOUT,
OTLPMetricExporter,
+ _get_split_resource_metrics_pb2,
+ _split_metrics_data,
)
from opentelemetry.exporter.otlp.proto.http.version import __version__
+from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2 import (
+ ExportMetricsServiceRequest,
+)
+from opentelemetry.proto.common.v1.common_pb2 import (
+ InstrumentationScope,
+ KeyValue,
+)
+from opentelemetry.proto.metrics.v1 import metrics_pb2 as pb2
+from opentelemetry.proto.resource.v1.resource_pb2 import (
+ Resource as Pb2Resource,
+)
from opentelemetry.sdk.environment_variables import (
_OTEL_PYTHON_EXPORTER_OTLP_HTTP_METRICS_CREDENTIAL_PROVIDER,
OTEL_EXPORTER_OTLP_CERTIFICATE,
@@ -91,6 +106,7 @@
# pylint: disable=protected-access
class TestOTLPMetricExporter(TestCase):
+ # pylint: disable=too-many-public-methods
def setUp(self):
self.metrics = {
"sum_int": MetricsData(
@@ -360,6 +376,670 @@
cert=exporter._client_cert,
)
+ def test_split_metrics_data_many_data_points(self):
+ metrics_data = ExportMetricsServiceRequest(
+ resource_metrics=[
+ _resource_metrics(
+ index=1,
+ scope_metrics=[
+ _scope_metrics(
+ index=1,
+ metrics=[
+ _gauge(
+ index=1,
+ data_points=[
+ _number_data_point(11),
+ _number_data_point(12),
+ _number_data_point(13),
+ ],
+ ),
+ ],
+ ),
+ ],
+ ),
+ ]
+ )
+ split_metrics_data: List[ExportMetricsServiceRequest] = list(
+ # pylint: disable=protected-access
+ _split_metrics_data(
+ metrics_data=metrics_data,
+ max_export_batch_size=2,
+ )
+ )
+
+ self.assertEqual(
+ [
+ ExportMetricsServiceRequest(
+ resource_metrics=[
+ _resource_metrics(
+ index=1,
+ scope_metrics=[
+ _scope_metrics(
+ index=1,
+ metrics=[
+ _gauge(
+ index=1,
+ data_points=[
+ _number_data_point(11),
+ _number_data_point(12),
+ ],
+ ),
+ ],
+ ),
+ ],
+ ),
+ ]
+ ),
+ ExportMetricsServiceRequest(
+ resource_metrics=[
+ _resource_metrics(
+ index=1,
+ scope_metrics=[
+ _scope_metrics(
+ index=1,
+ metrics=[
+ _gauge(
+ index=1,
+ data_points=[
+ _number_data_point(13),
+ ],
+ ),
+ ],
+ ),
+ ],
+ ),
+ ]
+ ),
+ ],
+ split_metrics_data,
+ )
+
+ def test_split_metrics_data_nb_data_points_equal_batch_size(self):
+ metrics_data = ExportMetricsServiceRequest(
+ resource_metrics=[
+ _resource_metrics(
+ index=1,
+ scope_metrics=[
+ _scope_metrics(
+ index=1,
+ metrics=[
+ _gauge(
+ index=1,
+ data_points=[
+ _number_data_point(11),
+ _number_data_point(12),
+ _number_data_point(13),
+ ],
+ ),
+ ],
+ ),
+ ],
+ ),
+ ]
+ )
+
+ split_metrics_data: List[ExportMetricsServiceRequest] = list(
+ # pylint: disable=protected-access
+ _split_metrics_data(
+ metrics_data=metrics_data,
+ max_export_batch_size=3,
+ )
+ )
+
+ self.assertEqual(
+ [
+ ExportMetricsServiceRequest(
+ resource_metrics=[
+ _resource_metrics(
+ index=1,
+ scope_metrics=[
+ _scope_metrics(
+ index=1,
+ metrics=[
+ _gauge(
+ index=1,
+ data_points=[
+ _number_data_point(11),
+ _number_data_point(12),
+ _number_data_point(13),
+ ],
+ ),
+ ],
+ ),
+ ],
+ ),
+ ]
+ ),
+ ],
+ split_metrics_data,
+ )
+
+ def test_split_metrics_data_many_resources_scopes_metrics(self):
+ # GIVEN
+ metrics_data = ExportMetricsServiceRequest(
+ resource_metrics=[
+ _resource_metrics(
+ index=1,
+ scope_metrics=[
+ _scope_metrics(
+ index=1,
+ metrics=[
+ _gauge(
+ index=1,
+ data_points=[
+ _number_data_point(11),
+ ],
+ ),
+ _gauge(
+ index=2,
+ data_points=[
+ _number_data_point(12),
+ ],
+ ),
+ ],
+ ),
+ _scope_metrics(
+ index=2,
+ metrics=[
+ _gauge(
+ index=3,
+ data_points=[
+ _number_data_point(13),
+ ],
+ ),
+ ],
+ ),
+ ],
+ ),
+ _resource_metrics(
+ index=2,
+ scope_metrics=[
+ _scope_metrics(
+ index=3,
+ metrics=[
+ _gauge(
+ index=4,
+ data_points=[
+ _number_data_point(14),
+ ],
+ ),
+ ],
+ ),
+ ],
+ ),
+ ]
+ )
+
+ split_metrics_data: List[ExportMetricsServiceRequest] = list(
+ # pylint: disable=protected-access
+ _split_metrics_data(
+ metrics_data=metrics_data,
+ max_export_batch_size=2,
+ )
+ )
+
+ self.assertEqual(
+ [
+ ExportMetricsServiceRequest(
+ resource_metrics=[
+ _resource_metrics(
+ index=1,
+ scope_metrics=[
+ _scope_metrics(
+ index=1,
+ metrics=[
+ _gauge(
+ index=1,
+ data_points=[
+ _number_data_point(11),
+ ],
+ ),
+ _gauge(
+ index=2,
+ data_points=[
+ _number_data_point(12),
+ ],
+ ),
+ ],
+ ),
+ ],
+ ),
+ ]
+ ),
+ ExportMetricsServiceRequest(
+ resource_metrics=[
+ _resource_metrics(
+ index=1,
+ scope_metrics=[
+ _scope_metrics(
+ index=2,
+ metrics=[
+ _gauge(
+ index=3,
+ data_points=[
+ _number_data_point(13),
+ ],
+ ),
+ ],
+ ),
+ ],
+ ),
+ _resource_metrics(
+ index=2,
+ scope_metrics=[
+ _scope_metrics(
+ index=3,
+ metrics=[
+ _gauge(
+ index=4,
+ data_points=[
+ _number_data_point(14),
+ ],
+ ),
+ ],
+ ),
+ ],
+ ),
+ ]
+ ),
+ ],
+ split_metrics_data,
+ )
+
+ def test_get_split_resource_metrics_pb2_one_of_each(self):
+ split_resource_metrics = [
+ {
+ "resource": Pb2Resource(
+ attributes=[
+ KeyValue(key="foo", value={"string_value": "bar"})
+ ],
+ ),
+ "schema_url": "http://foo-bar",
+ "scope_metrics": [
+ {
+ "scope": InstrumentationScope(
+ name="foo-scope", version="1.0.0"
+ ),
+ "schema_url": "http://foo-baz",
+ "metrics": [
+ {
+ "name": "foo-metric",
+ "description": "foo-description",
+ "unit": "foo-unit",
+ "sum": {
+ "aggregation_temporality": 1,
+ "is_monotonic": True,
+ "data_points": [
+ pb2.NumberDataPoint(
+ attributes=[
+ KeyValue(
+ key="dp_key",
+ value={
+ "string_value":
"dp_value"
+ },
+ )
+ ],
+ start_time_unix_nano=12345,
+ time_unix_nano=12350,
+ as_double=42.42,
+ )
+ ],
+ },
+ }
+ ],
+ }
+ ],
+ }
+ ]
+
+ result = _get_split_resource_metrics_pb2(split_resource_metrics)
+ self.assertEqual(len(result), 1)
+ self.assertIsInstance(result[0], pb2.ResourceMetrics)
+ self.assertEqual(result[0].schema_url, "http://foo-bar")
+ self.assertEqual(len(result[0].scope_metrics), 1)
+ self.assertEqual(result[0].scope_metrics[0].scope.name, "foo-scope")
+ self.assertEqual(len(result[0].scope_metrics[0].metrics), 1)
+ self.assertEqual(
+ result[0].scope_metrics[0].metrics[0].name, "foo-metric"
+ )
+ self.assertEqual(
+ result[0].scope_metrics[0].metrics[0].sum.is_monotonic, True
+ )
+
+ def test_get_split_resource_metrics_pb2_multiples(self):
+ split_resource_metrics = [
+ {
+ "resource": Pb2Resource(
+ attributes=[
+ KeyValue(key="foo1", value={"string_value": "bar2"})
+ ],
+ ),
+ "schema_url": "http://foo-bar-1",
+ "scope_metrics": [
+ {
+ "scope": InstrumentationScope(
+ name="foo-scope-1", version="1.0.0"
+ ),
+ "schema_url": "http://foo-baz-1",
+ "metrics": [
+ {
+ "name": "foo-metric-1",
+ "description": "foo-description-1",
+ "unit": "foo-unit-1",
+ "gauge": {
+ "data_points": [
+ pb2.NumberDataPoint(
+ attributes=[
+ KeyValue(
+ key="dp_key",
+ value={
+ "string_value":
"dp_value"
+ },
+ )
+ ],
+ start_time_unix_nano=12345,
+ time_unix_nano=12350,
+ as_double=42.42,
+ )
+ ],
+ },
+ }
+ ],
+ }
+ ],
+ },
+ {
+ "resource": Pb2Resource(
+ attributes=[
+ KeyValue(key="foo2", value={"string_value": "bar2"})
+ ],
+ ),
+ "schema_url": "http://foo-bar-2",
+ "scope_metrics": [
+ {
+ "scope": InstrumentationScope(
+ name="foo-scope-2", version="2.0.0"
+ ),
+ "schema_url": "http://foo-baz-2",
+ "metrics": [
+ {
+ "name": "foo-metric-2",
+ "description": "foo-description-2",
+ "unit": "foo-unit-2",
+ "histogram": {
+ "aggregation_temporality": 2,
+ "data_points": [
+ pb2.HistogramDataPoint(
+ attributes=[
+ KeyValue(
+ key="dp_key",
+ value={
+ "string_value":
"dp_value"
+ },
+ )
+ ],
+ start_time_unix_nano=12345,
+ time_unix_nano=12350,
+ )
+ ],
+ },
+ }
+ ],
+ }
+ ],
+ },
+ ]
+
+ result = _get_split_resource_metrics_pb2(split_resource_metrics)
+ self.assertEqual(len(result), 2)
+ self.assertEqual(result[0].schema_url, "http://foo-bar-1")
+ self.assertEqual(result[1].schema_url, "http://foo-bar-2")
+ self.assertEqual(len(result[0].scope_metrics), 1)
+ self.assertEqual(len(result[1].scope_metrics), 1)
+ self.assertEqual(result[0].scope_metrics[0].scope.name, "foo-scope-1")
+ self.assertEqual(result[1].scope_metrics[0].scope.name, "foo-scope-2")
+ self.assertEqual(
+ result[0].scope_metrics[0].metrics[0].name, "foo-metric-1"
+ )
+ self.assertEqual(
+ result[1].scope_metrics[0].metrics[0].name, "foo-metric-2"
+ )
+
+ def test_get_split_resource_metrics_pb2_unsupported_metric_type(self):
+ split_resource_metrics = [
+ {
+ "resource": Pb2Resource(
+ attributes=[
+ KeyValue(key="foo", value={"string_value": "bar"})
+ ],
+ ),
+ "schema_url": "http://foo-bar",
+ "scope_metrics": [
+ {
+ "scope": InstrumentationScope(
+ name="foo", version="1.0.0"
+ ),
+ "schema_url": "http://foo-baz",
+ "metrics": [
+ {
+ "name": "unsupported-metric",
+ "description": "foo-bar",
+ "unit": "foo-bar",
+ "unsupported_metric_type": {},
+ }
+ ],
+ }
+ ],
+ }
+ ]
+
+ with self.assertLogs(level="WARNING") as log:
+ result = _get_split_resource_metrics_pb2(split_resource_metrics)
+ self.assertEqual(len(result), 1)
+ self.assertIn(
+ "Tried to split and export an unsupported metric type",
+ log.output[0],
+ )
+
+ @staticmethod
+ def _create_metrics_data_multiple_data_points(
+ num_data_points: int,
+ ) -> MetricsData:
+ """Helper to create MetricsData with specified number of data points
for testing batch splitting."""
+ metrics = []
+ for idx in range(num_data_points):
+ metrics.append(_generate_sum(f"sum_int_{idx}", 33))
+
+ return MetricsData(
+ resource_metrics=[
+ ResourceMetrics(
+ resource=Resource(
+ attributes={"a": 1, "b": False},
+ schema_url="resource_schema_url",
+ ),
+ scope_metrics=[
+ ScopeMetrics(
+ scope=SDKInstrumentationScope(
+ name="first_name",
+ version="first_version",
+ schema_url="insrumentation_scope_schema_url",
+ ),
+ metrics=metrics,
+ schema_url="instrumentation_scope_schema_url",
+ )
+ ],
+ schema_url="resource_schema_url",
+ )
+ ]
+ )
+
+ @patch.object(Session, "post")
+ def test_export_max_export_batch_size_single_batch_integration(
+ self, mock_post
+ ):
+ resp = Response()
+ resp.status_code = 200
+ mock_post.return_value = resp
+
+ # 2 data points, batch size of 3: fits in one batch
+ metrics_data = (
+ TestOTLPMetricExporter._create_metrics_data_multiple_data_points(2)
+ )
+ exporter = OTLPMetricExporter(max_export_batch_size=3)
+ result = exporter.export(metrics_data)
+
+ self.assertEqual(result, MetricExportResult.SUCCESS)
+ self.assertEqual(mock_post.call_count, 1)
+ mock_post.assert_called_once()
+
+ call_args = mock_post.call_args
+ self.assertEqual(call_args.kwargs["url"], exporter._endpoint)
+ self.assertIsInstance(call_args.kwargs["data"], bytes)
+ self.assertEqual(
+ call_args.kwargs["verify"], exporter._certificate_file
+ )
+ batch_data = call_args.kwargs["data"]
+ request = ExportMetricsServiceRequest()
+ request.ParseFromString(batch_data)
+ self.assertEqual(len(request.resource_metrics), 1)
+ metrics = request.resource_metrics[0].scope_metrics[0].metrics
+ self.assertEqual(len(metrics), 2)
+ metric_names = {metric.name for metric in metrics}
+ self.assertEqual(metric_names, {"sum_int_0", "sum_int_1"})
+
+ @patch.object(Session, "post")
+ def test_export_max_export_batch_size_multiple_batches_integration(
+ self, mock_post
+ ):
+ resp = Response()
+ resp.status_code = 200
+ mock_post.return_value = resp
+
+ # 3 data points, batch size of 2: requires 2 batches
+ metrics_data = (
+ TestOTLPMetricExporter._create_metrics_data_multiple_data_points(3)
+ )
+ exporter = OTLPMetricExporter(max_export_batch_size=2)
+ result = exporter.export(metrics_data)
+
+ self.assertEqual(result, MetricExportResult.SUCCESS)
+ self.assertEqual(mock_post.call_count, 2)
+
+ for call_args in mock_post.call_args_list:
+ self.assertEqual(call_args.kwargs["url"], exporter._endpoint)
+ self.assertIsInstance(call_args.kwargs["data"], bytes)
+ self.assertEqual(
+ call_args.kwargs["verify"], exporter._certificate_file
+ )
+ self.assertEqual(len(mock_post.call_args_list), 2)
+
+ # First batch should contain sum_int_0 and sum_int_1
+ first_batch_data = mock_post.call_args_list[0].kwargs["data"]
+ first_request = ExportMetricsServiceRequest()
+ first_request.ParseFromString(first_batch_data)
+ self.assertEqual(len(first_request.resource_metrics), 1)
+ first_metrics = (
+ first_request.resource_metrics[0].scope_metrics[0].metrics
+ )
+ self.assertEqual(len(first_metrics), 2)
+ first_metric_names = {metric.name for metric in first_metrics}
+ self.assertEqual(first_metric_names, {"sum_int_0", "sum_int_1"})
+
+ # Second batch should contain sum_int_2
+ second_batch_data = mock_post.call_args_list[1].kwargs["data"]
+ second_request = ExportMetricsServiceRequest()
+ second_request.ParseFromString(second_batch_data)
+ self.assertEqual(len(second_request.resource_metrics), 1)
+ second_metrics = (
+ second_request.resource_metrics[0].scope_metrics[0].metrics
+ )
+ self.assertEqual(len(second_metrics), 1)
+ self.assertEqual(second_metrics[0].name, "sum_int_2")
+
+ @patch.object(Session, "post")
+ def test_export_max_export_batch_size_retry_scenarios_integration(
+ self, mock_post
+ ):
+ # Setup HTTP responses: first request succeeds, second fails
non-retryable
+ success_resp = Response()
+ success_resp.status_code = 200
+ failure_resp = Response()
+ failure_resp.status_code = 400
+ failure_resp.reason = "Bad Request"
+ mock_post.side_effect = [success_resp, failure_resp]
+
+ # 3 data points, batch size of 2: requires 2 batches
+ metrics_data = (
+ TestOTLPMetricExporter._create_metrics_data_multiple_data_points(3)
+ )
+ exporter = OTLPMetricExporter(max_export_batch_size=2)
+
+ # Export should fail when second batch fails
+ result = exporter.export(metrics_data)
+ self.assertEqual(result, MetricExportResult.FAILURE)
+ self.assertEqual(mock_post.call_count, 2)
+
+ # Verify the content of successful first batch
+ first_batch_data = mock_post.call_args_list[0].kwargs["data"]
+ first_request = ExportMetricsServiceRequest()
+ first_request.ParseFromString(first_batch_data)
+ self.assertEqual(len(first_request.resource_metrics), 1)
+ first_metrics = (
+ first_request.resource_metrics[0].scope_metrics[0].metrics
+ )
+ self.assertEqual(len(first_metrics), 2)
+ first_metric_names = {metric.name for metric in first_metrics}
+ self.assertEqual(first_metric_names, {"sum_int_0", "sum_int_1"})
+
+ @patch.object(Session, "post")
+ def test_export_max_export_batch_size_retryable_failure_integration(
+ self, mock_post
+ ):
+ success_resp = Response()
+ success_resp.status_code = 200
+ retryable_failure_resp = Response()
+ retryable_failure_resp.status_code = 503
+ retryable_failure_resp.reason = "Service Unavailable"
+ mock_post.side_effect = [
+ success_resp,
+ retryable_failure_resp,
+ success_resp,
+ ]
+
+ # 3 data points, batch size of 2: requires 2 batches
+ metrics_data = (
+ TestOTLPMetricExporter._create_metrics_data_multiple_data_points(3)
+ )
+ exporter = OTLPMetricExporter(max_export_batch_size=2, timeout=2.0)
+
+ # Export should eventually succeed after retry
+ result = exporter.export(metrics_data)
+ self.assertEqual(result, MetricExportResult.SUCCESS)
+ self.assertEqual(
+ mock_post.call_count, 3
+ ) # First batch + retry of second batch
+
+ first_batch_data = mock_post.call_args_list[0].kwargs["data"]
+ first_request = ExportMetricsServiceRequest()
+ first_request.ParseFromString(first_batch_data)
+ self.assertEqual(len(first_request.resource_metrics), 1)
+ first_metrics = (
+ first_request.resource_metrics[0].scope_metrics[0].metrics
+ )
+ self.assertEqual(len(first_metrics), 2)
+ first_metric_names = {metric.name for metric in first_metrics}
+ self.assertEqual(first_metric_names, {"sum_int_0", "sum_int_1"})
+ # Second batch (retry) should contain sum_int_2
+ second_batch_data = mock_post.call_args_list[2].kwargs["data"]
+ second_request = ExportMetricsServiceRequest()
+ second_request.ParseFromString(second_batch_data)
+ self.assertEqual(len(second_request.resource_metrics), 1)
+ second_metrics = (
+ second_request.resource_metrics[0].scope_metrics[0].metrics
+ )
+ self.assertEqual(len(second_metrics), 1)
+ self.assertEqual(second_metrics[0].name, "sum_int_2")
+
def test_aggregation_temporality(self):
otlp_metric_exporter = OTLPMetricExporter()
@@ -635,3 +1315,44 @@
)
assert after - before < 0.2
+
+
+def _resource_metrics(
+ index: int, scope_metrics: List[pb2.ScopeMetrics]
+) -> pb2.ResourceMetrics:
+ return pb2.ResourceMetrics(
+ resource={
+ "attributes": [KeyValue(key="a", value={"int_value": index})],
+ },
+ schema_url=f"resource_url_{index}",
+ scope_metrics=scope_metrics,
+ )
+
+
+def _scope_metrics(index: int, metrics: List[pb2.Metric]) -> pb2.ScopeMetrics:
+ return pb2.ScopeMetrics(
+ scope=InstrumentationScope(name=f"scope_{index}"),
+ schema_url=f"scope_url_{index}",
+ metrics=metrics,
+ )
+
+
+def _gauge(index: int, data_points: List[pb2.NumberDataPoint]) -> pb2.Metric:
+ return pb2.Metric(
+ name=f"gauge_{index}",
+ description="description",
+ unit="unit",
+ gauge=pb2.Gauge(data_points=data_points),
+ )
+
+
+def _number_data_point(value: int) -> pb2.NumberDataPoint:
+ return pb2.NumberDataPoint(
+ attributes=[
+ KeyValue(key="a", value={"int_value": 1}),
+ KeyValue(key="b", value={"bool_value": True}),
+ ],
+ start_time_unix_nano=1641946015139533244,
+ time_unix_nano=1641946016139533244,
+ as_int=value,
+ )