Hello all,

I have an application where i want to use lots of labels around 100k+. I 
don't have too many metrics otherwise and we can assume that total number 
of timeseries 
caused due to these labels might remain under 2million. I was wondering 
apart from querying on the top level metric(without label filter), are 
there any
other things to be aware of which can cause poor behavior in prometheus. 

Second question is i'm running a test in the scenario described above and i 
see RSS going up constantly. This is the output from the tsdb status 

curl -s localhost:9090/api/v1/status/tsdb | python3 -m json.tool

{

    "status": "success",

    "data": {

        "seriesCountByMetricName": [

            {

                "name": "testAttr",

                "value": 500000

            },

            {

                "name": "prometheus_http_request_duration_seconds_bucket",

                "value": 80

            },

            {

                "name": "prometheus_http_response_size_bytes_bucket",

                "value": 72

            },

            {

                "name": "prometheus_sd_kubernetes_events_total",

                "value": 15

            },

            {

                "name": "prometheus_tsdb_compaction_chunk_samples_bucket",

                "value": 13

            },

            {

                "name": 
"prometheus_tsdb_compaction_chunk_size_bytes_bucket",

                "value": 13

            },

            {

                "name": "net_conntrack_dialer_conn_failed_total",

                "value": 12

            },

            {

                "name": "prometheus_tsdb_tombstone_cleanup_seconds_bucket",

                "value": 12

            },

            {

                "name": "prometheus_engine_query_duration_seconds",

                "value": 12

            },

            {

                "name": 
"prometheus_tsdb_compaction_duration_seconds_bucket",

                "value": 11

            }

        ],

        "labelValueCountByLabelName": [

            {

                "name": "sid",

                "value": 500000

            },

            {

                "name": "__name__",

                "value": 187

            },

            {

                "name": "le",

                "value": 66

            },

            {

                "name": "quantile",

                "value": 9

            },

            {

                "name": "handler",

                "value": 8

            },

            {

                "name": "role",

                "value": 5

            },

            {

                "name": "reason",

                "value": 4

            },

            {

                "name": "code",

                "value": 4

            },

            {

                "name": "slice",

                "value": 4

            },

            {

                "name": "event",

                "value": 3

            }

        ],

        "memoryInBytesByLabelName": [

            {

                "name": "sid",

                "value": 4888890

            },

            {

                "name": "__name__",

                "value": 7032

            },

            {

                "name": "le",

                "value": 307

            },

            {

                "name": "handler",

                "value": 109

            },

            {

                "name": "slice",

                "value": 43

            },

            {

                "name": "revision",

                "value": 40

            },

            {

                "name": "instance",

                "value": 32

            },

            {

                "name": "reason",

                "value": 31

            },

            {

                "name": "role",

                "value": 30

            },

            {

                "name": "quantile",

                "value": 28

            }

        ],

        "seriesCountByLabelValuePair": [

            {

                "name": "job=myapp",

                "value": 500043

            },

            {

                "name": "instance=192.168.0.124:2112",

                "value": 500043

            },

            {

                "name": "__name__=testAttr",

                "value": 500000

            },

            {

                "name": "instance=localhost:9090",

                "value": 517

            },

            {

                "name": "job=prometheus",

                "value": 517

            },

            {

                "name": 
"__name__=prometheus_http_request_duration_seconds_bucket",

                "value": 80

            },

            {

                "name": 
"__name__=prometheus_http_response_size_bytes_bucket",

                "value": 72

            },

            {

                "name": "handler=/api/v1/status/tsdb",

                "value": 24

            },

            {

                "name": "handler=/metrics",

                "value": 24

            },

            {

                "name": "handler=/api/v1/label/:name/values",

                "value": 24

            }

        ]

    }

}

..
Thanks
Karthik

-- 
You received this message because you are subscribed to the Google Groups 
"Prometheus Users" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
To view this discussion on the web visit 
https://groups.google.com/d/msgid/prometheus-users/55f35f30-630c-4488-83ed-a19e5e843df3%40googlegroups.com.

Reply via email to