openvm_stark_sdk/bench/
mod.rs

1use std::{collections::BTreeMap, ffi::OsStr};
2
3#[cfg(feature = "prometheus")]
4use metrics_exporter_prometheus::PrometheusBuilder;
5use metrics_tracing_context::{MetricsLayer, TracingContextLayer};
6use metrics_util::{
7    debugging::{DebugValue, DebuggingRecorder, Snapshot},
8    layers::Layer,
9    CompositeKey, MetricKind,
10};
11use serde_json::json;
12use tracing_forest::ForestLayer;
13use tracing_subscriber::{layer::SubscriberExt, EnvFilter, Registry};
14
15#[cfg(feature = "metrics")]
16use crate::metrics_tracing::TimingMetricsLayer;
17
18/// Run a function with metric collection enabled. The metrics will be written to a file specified
19/// by an environment variable which name is `output_path_envar`.
20pub fn run_with_metric_collection<R>(
21    output_path_envar: impl AsRef<OsStr>,
22    f: impl FnOnce() -> R,
23) -> R {
24    let file = std::env::var(output_path_envar).map(|path| std::fs::File::create(path).unwrap());
25    // Set up tracing:
26    let env_filter =
27        EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info,p3_=warn"));
28    // Plonky3 logging is more verbose, so we set default to debug.
29    let subscriber = Registry::default()
30        .with(env_filter)
31        .with(ForestLayer::default())
32        .with(MetricsLayer::new());
33    #[cfg(feature = "metrics")]
34    let subscriber = subscriber.with(TimingMetricsLayer::new());
35    // Prepare tracing.
36    tracing::subscriber::set_global_default(subscriber).unwrap();
37
38    // Prepare metrics.
39    let recorder = DebuggingRecorder::new();
40    let snapshotter = recorder.snapshotter();
41    let recorder = TracingContextLayer::all().layer(recorder);
42    // Install the registry as the global recorder
43    metrics::set_global_recorder(recorder).unwrap();
44    let res = f();
45
46    if let Ok(file) = file {
47        serde_json::to_writer_pretty(&file, &serialize_metric_snapshot(snapshotter.snapshot()))
48            .unwrap();
49    }
50    res
51}
52
53/// Run a function with metric exporter enabled. The metrics will be served on the port specified
54/// by an environment variable which name is `metrics_port_envar`.
55#[cfg(feature = "prometheus")]
56pub fn run_with_metric_exporter<R>(
57    metrics_port_envar: impl AsRef<OsStr>,
58    f: impl FnOnce() -> R,
59) -> R {
60    // Get the port from environment variable or use a default
61    let metrics_port = std::env::var(metrics_port_envar)
62        .map(|port| port.parse::<u16>().unwrap_or(9091))
63        .unwrap();
64    let endpoint = format!("http://127.0.0.1:{}/metrics/job/stark-sdk", metrics_port);
65
66    // Clear metrics before pushing to the push gateway
67    let status = std::process::Command::new("curl")
68        .args(["-X", "DELETE", &endpoint])
69        .status()
70        .expect("Failed to clear metrics");
71    if status.success() {
72        println!("Metrics cleared successfully");
73    }
74
75    // Install the default crypto provider
76    rustls::crypto::aws_lc_rs::default_provider()
77        .install_default()
78        .expect("Failed to install default crypto provider");
79
80    // Set up Prometheus recorder and exporter
81    let builder = PrometheusBuilder::new()
82        .with_push_gateway(endpoint, std::time::Duration::from_secs(60), None, None)
83        .expect("Push gateway endpoint should be valid");
84
85    let recorder = if let Ok(handle) = tokio::runtime::Handle::try_current() {
86        let (recorder, exporter) = {
87            let _g = handle.enter();
88            builder.build().unwrap()
89        };
90        handle.spawn(exporter);
91        recorder
92    } else {
93        let thread_name = "metrics-exporter-prometheus-push-gateway";
94        let runtime = tokio::runtime::Builder::new_current_thread()
95            .enable_all()
96            .build()
97            .unwrap();
98        let (recorder, exporter) = {
99            let _g = runtime.enter();
100            builder.build().unwrap()
101        };
102        std::thread::Builder::new()
103            .name(thread_name.to_string())
104            .spawn(move || runtime.block_on(exporter))
105            .unwrap();
106        recorder
107    };
108
109    // Set up tracing:
110    let env_filter =
111        EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info,p3_=warn"));
112    // Plonky3 logging is more verbose, so we set default to debug.
113    let subscriber = Registry::default()
114        .with(env_filter)
115        .with(ForestLayer::default())
116        .with(MetricsLayer::new());
117    #[cfg(feature = "metrics")]
118    let subscriber = subscriber.with(TimingMetricsLayer::new());
119    // Prepare tracing.
120    tracing::subscriber::set_global_default(subscriber).unwrap();
121
122    // Prepare metrics
123    let recorder = TracingContextLayer::all().layer(recorder);
124    // Install the registry as the global recorder
125    metrics::set_global_recorder(recorder).unwrap();
126
127    // Run the actual function
128    let res = f();
129    std::thread::sleep(std::time::Duration::from_secs(80));
130    println!(
131        "Metrics available at http://127.0.0.1:{}/metrics/job/stark-sdk",
132        metrics_port
133    );
134    res
135}
136
137/// Serialize a gauge/counter metric into a JSON object. The object has the following structure:
138/// {
139///    "metric": <Metric Name>,
140///    "labels": [
141///       (<key1>, <value1>),
142///       (<key2>, <value2>),
143///     ],
144///    "value": <float value if gauge | integer value if counter>
145/// }
146fn serialize_metric(ckey: CompositeKey, value: DebugValue) -> serde_json::Value {
147    let (_kind, key) = ckey.into_parts();
148    let (key_name, labels) = key.into_parts();
149    let value = match value {
150        DebugValue::Gauge(v) => v.into_inner().to_string(),
151        DebugValue::Counter(v) => v.to_string(),
152        DebugValue::Histogram(_) => todo!("Histograms not supported yet."),
153    };
154    let labels = labels
155        .into_iter()
156        .map(|label| {
157            let (k, v) = label.into_parts();
158            (k.as_ref().to_owned(), v.as_ref().to_owned())
159        })
160        .collect::<Vec<_>>();
161
162    json!({
163        "metric": key_name.as_str(),
164        "labels": labels,
165        "value": value,
166    })
167}
168
169/// Serialize a metric snapshot into a JSON object. The object has the following structure:
170/// {
171///   "gauge": [
172///     {
173///         "metric": <Metric Name>,
174///         "labels": [
175///             (<key1>, <value1>),
176///             (<key2>, <value2>),
177///         ],
178///         "value": <float value>
179///     },
180///     ...
181///   ],
182///   ...
183/// }
184pub fn serialize_metric_snapshot(snapshot: Snapshot) -> serde_json::Value {
185    let mut ret = BTreeMap::<_, Vec<serde_json::Value>>::new();
186    for (ckey, _, _, value) in snapshot.into_vec() {
187        match ckey.kind() {
188            MetricKind::Gauge => {
189                ret.entry("gauge")
190                    .or_default()
191                    .push(serialize_metric(ckey, value));
192            }
193            MetricKind::Counter => {
194                ret.entry("counter")
195                    .or_default()
196                    .push(serialize_metric(ckey, value));
197            }
198            MetricKind::Histogram => todo!(),
199        }
200    }
201    json!(ret)
202}