to_hex
on it.
// main.rs:
#[derive(Default)]
pub struct MyEmployeeService {}
#[tonic::async_trait]
impl EmployeeService for MyEmployeeService {
async fn get_all_employees(
&self,
request: Request<()>,
) -> Result<Response<GetAllEmployeesResponse>, Status> {
let parent_ctx =
global::get_text_map_propagator(|prop| prop.extract(&MetadataMap(request.metadata())));
let span = global::tracer("employee-service")
.start_with_context("get_all_employees", parent_ctx.clone()); //<---- doesn't look right
span.set_attribute(KeyValue::new("request", format!("{:?}", request)));
let connection = database::create_connection(parent_ctx);
let employees: Vec<Employee> = database::get_employees(&connection)
.into_iter()
.map(model_mapper)
.collect();
let result = GetAllEmployeesResponse { employees };
Ok(Response::new(result))
}
}
//database.rs:
pub fn create_connection(ctx: Context) -> PgConnection {
let tracer = global::tracer("database-tracer");
let _span = tracer
.span_builder("create_connection")
.with_parent_context(ctx)
.start(&tracer);
dotenv().ok();
let database_url = env::var("DATABASE_URL").expect("DATABASE_URL must be set");
PgConnection::establish(&database_url)
.unwrap_or_else(|_| panic!("Error connecting to {}", database_url))
}
hey; cools stuff about 0.13.0 let's hope it is a lucky number :) anyway, small question about metrics again. if I get it right for ValueRecorders
the aggregator is set once when the metric pipeline is created for example :
controller = Some(opentelemetry_otlp::new_metrics_pipeline(tokio::spawn, delayed_interval)
.with_export_config(export_config)
.with_period(std::time::Duration::from_secs(open_telemetry.metric_window.unwrap_or_else(|| 30)))
.with_aggregator_selector(selectors::simple::Selector::Histogram(vec![0.0,0.1,0.2,0.3,0.5,0.8,1.3,2.1]))
.build()?);
My understanding is that this will apply to all ValueRecorders
and all histograms will have same buckets.
Isn't it a bit of a limitation ?
From my application perspective, I would like to be able to set different buckets for different metrics.
i'm trying to use opentelemetry in an existing actix-web application, that already uses tracing
to export into jaeger. i try to configure everything like this:
use opentelemetry::{global, runtime::TokioCurrentThread};
use tracing::{subscriber::set_global_default, Subscriber};
use tracing_bunyan_formatter::{BunyanFormattingLayer, JsonStorageLayer};
use tracing_log::LogTracer;
use tracing_subscriber::{layer::SubscriberExt, EnvFilter, Registry};
pub fn get_subscriber(name: &str, env_filter: &str) -> impl Subscriber + Send + Sync {
let env_filter =
EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(env_filter));
let formatting_layer = BunyanFormattingLayer::new(name.to_string(), std::io::stdout);
global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new());
let tracer = opentelemetry_jaeger::new_pipeline()
.with_service_name(name)
.install_batch(TokioCurrentThread)
.expect("cannot install jaeger pipeline");
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
Registry::default()
.with(telemetry)
.with(env_filter)
.with(JsonStorageLayer)
.with(formatting_layer)
}
pub fn init_subscriber(subscriber: impl Subscriber + Send + Sync) {
LogTracer::init().expect("Failed to set logger");
set_global_default(subscriber).expect("Failed to set tracing subscriber");
}
and will later call it init_subscriber(get_subscriber("app_name", "info"))
but this fails because opentelemetry::sdk::trace::Tracer
does not implement the following traits: opentelemetry::trace::tracer::Tracer
, PreSampledTracer
.
i don't know what i'm missing here...
there is no reactor running, must be called from the context of a Tokio 1.x runtime
because opentelemetry 0.12 pulls in tokio 1.0 while actix-web stable is still on 0.*opentelemetry_otlp::new_metrics_pipeline(tokio::spawn, delayed_interval)
.with_export_config(export_config)
.with_period(std::time::Duration::from_secs(open_telemetry.metric_window.unwrap_or_else(|| 30)))
.with_aggregator_selector(selectors::simple::Selector::Histogram(vec![0.0, 0.1, 0.2, 0.3, 0.5, 0.8, 1.3, 2.1]))
.build()?,
there is a PR open so you can customize aggregators and buckets that they might be using based on metric descriptor name open-telemetry/opentelemetry-rust#497
./target/debug/deps/sha2-0dbd11ac79fa6266.gcno:version '408*', prefer 'A93*'
find: ‘gcov’ terminated by signal 11
hey all, i am running this example here
fn init_meter() -> metrics::Result<PushController> {
let export_config = ExporterConfig {
endpoint: "http://localhost:4317".to_string(),
protocol: Protocol::Grpc,
..ExporterConfig::default()
};
opentelemetry_otlp::new_metrics_pipeline(tokio::spawn, delayed_interval)
.with_export_config(export_config)
.with_aggregator_selector(selectors::simple::Selector::Exact)
.build()
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error + Send + Sync + 'static>> {
let _started = init_meter()?;
let meter = global::meter("test2");
let value_counter = meter.u64_counter("blah9").init();
let labels = vec![KeyValue::new("key1","val1")];
for i in 0..100{
let j = i%4;
println!("{} {}",i,j);
let mut labels = vec![];//labels.clone();
let kv = match j{
0=> KeyValue::new("method","GET"),
1=> KeyValue::new("method","POST"),
2=> KeyValue::new("method","PUT"),
3=> KeyValue::new("method","DELETE"),
_=> KeyValue::new("method","HEAD"),
};
labels.push(kv);
// labels.push(KeyValue::new("key4",j.to_string()));
value_counter.add(1, &labels);
tokio::time::sleep(Duration::from_secs(1)).await;
}
// wait for 1 minutes so that we could see metrics being pushed via OTLP every 10 seconds.
tokio::time::sleep(Duration::from_secs(60)).await;
shutdown_tracer_provider();
Ok(())
}
At the end, in Prometheus dashboard I am getting
agent_blah9{collector="pf", instance="otel-agent:8889", job="otel-collector", method="PUT", type="docker"} 25
where I would expect
agent_blah9{collector="pf", instance="otel-agent:8889", job="otel-collector", method="PUT", type="docker"} 25
agent_blah9{collector="pf", instance="otel-agent:8889", job="otel-collector", method="POST", type="docker"} 25
agent_blah9{collector="pf", instance="otel-agent:8889", job="otel-collector", method="GET", type="docker"} 25
agent_blah9{collector="pf", instance="otel-agent:8889", job="otel-collector", method="DELETE", type="docker"} 25
Any ideas ? I had a look at agent opentelemetry-agent logs and there seems to be only one type of metric there
Data point labels:
-> method: PUT