File size: 7,098 Bytes
b98ffbb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
use chrono::{DateTime, Utc};
use dora_node_api::{
self,
arrow::{
array::{
make_array, Array, ListArray, StringArray, TimestampMillisecondArray, UInt64Array,
},
buffer::{OffsetBuffer, ScalarBuffer},
datatypes::{DataType, Field, Schema},
record_batch::RecordBatch,
},
DoraNode, Event, Metadata,
};
use dora_tracing::telemetry::deserialize_to_hashmap;
use eyre::{Context, ContextCompat};
use parquet::{arrow::AsyncArrowWriter, basic::BrotliLevel, file::properties::WriterProperties};
use std::{collections::HashMap, path::PathBuf, sync::Arc};
use tokio::sync::mpsc;
#[tokio::main]
async fn main() -> eyre::Result<()> {
let (node, mut events) = DoraNode::init_from_env()?;
let dataflow_id = node.dataflow_id();
let mut writers = HashMap::new();
while let Some(event) = events.recv() {
match event {
Event::Input { id, data, metadata } => {
match writers.get(&id) {
None => {
let field_uhlc = Field::new("timestamp_uhlc", DataType::UInt64, false);
let field_utc_epoch = Field::new(
"timestamp_utc",
DataType::Timestamp(
dora_node_api::arrow::datatypes::TimeUnit::Millisecond,
None,
),
false,
);
let field_trace_id = Field::new("trace_id", DataType::Utf8, true);
let field_span_id = Field::new("span_id", DataType::Utf8, true);
let field_values =
Arc::new(Field::new("item", data.data_type().clone(), true));
let field_data = Field::new(id.clone(), DataType::List(field_values), true);
let schema = Arc::new(Schema::new(vec![
field_trace_id,
field_span_id,
field_uhlc,
field_utc_epoch,
field_data,
]));
let dataflow_dir = PathBuf::from("out").join(dataflow_id.to_string());
if !dataflow_dir.exists() {
std::fs::create_dir_all(&dataflow_dir)
.context("could not create dataflow_dir")?;
}
let file =
tokio::fs::File::create(dataflow_dir.join(format!("{id}.parquet")))
.await
.context("Couldn't create write file")?;
let mut writer = AsyncArrowWriter::try_new(
file,
schema.clone(),
Some(
WriterProperties::builder()
.set_compression(parquet::basic::Compression::BROTLI(
BrotliLevel::default(),
))
.build(),
),
)
.context("Could not create parquet writer")?;
let (tx, mut rx) = mpsc::channel(10);
// Per Input thread
let join_handle = tokio::spawn(async move {
while let Some((data, metadata)) = rx.recv().await {
if let Err(e) =
write_event(&mut writer, data, &metadata, schema.clone()).await
{
println!("Error writing event data into parquet file: {:?}", e)
};
}
writer.close().await
});
tx.send((data.into(), metadata))
.await
.context("Could not send event data into writer loop")?;
writers.insert(id, (tx, join_handle));
}
Some((tx, _)) => {
tx.send((data.into(), metadata))
.await
.context("Could not send event data into writer loop")?;
}
};
}
Event::InputClosed { id } => match writers.remove(&id) {
None => {}
Some(tx) => drop(tx),
},
_ => {}
}
}
for (id, (tx, join_handle)) in writers {
drop(tx);
join_handle
.await
.context("Writer thread failed")?
.context(format!(
"Could not close the Parquet writer for {id} parquet writer"
))?;
}
Ok(())
}
/// Write a row of data into the writer
async fn write_event(
writer: &mut AsyncArrowWriter<tokio::fs::File>,
data: Arc<dyn Array>,
metadata: &Metadata,
schema: Arc<Schema>,
) -> eyre::Result<()> {
let offsets = OffsetBuffer::new(ScalarBuffer::from(vec![0, data.len() as i32]));
let field = Arc::new(Field::new("item", data.data_type().clone(), true));
let list = ListArray::new(field, offsets, data.clone(), None);
let timestamp = metadata.timestamp();
let timestamp_uhlc = UInt64Array::from(vec![timestamp.get_time().0]);
let timestamp_uhlc = make_array(timestamp_uhlc.into());
let system_time = timestamp.get_time().to_system_time();
let dt: DateTime<Utc> = system_time.into();
let timestamp_utc = TimestampMillisecondArray::from(vec![dt.timestamp_millis()]);
let timestamp_utc = make_array(timestamp_utc.into());
let string_otel_context = metadata.parameters.open_telemetry_context.to_string();
let otel_context = deserialize_to_hashmap(&string_otel_context);
let traceparent = otel_context.get("traceparent");
let trace_id = match traceparent {
None => "",
Some(trace) => trace.split('-').nth(1).context("Trace is malformatted")?,
};
let span_id = match traceparent {
None => "",
Some(trace) => trace.split('-').nth(2).context("Trace is malformatted")?,
};
let trace_id_array = StringArray::from(vec![trace_id]);
let trace_id_array = make_array(trace_id_array.into());
let span_id_array = StringArray::from(vec![span_id]);
let span_id_array = make_array(span_id_array.into());
let record = RecordBatch::try_new(
schema,
vec![
trace_id_array,
span_id_array,
timestamp_uhlc,
timestamp_utc,
make_array(list.into()),
],
)
.context("Could not create record batch with the given data")?;
writer
.write(&record)
.await
.context("Could not write recordbatch to file")?;
Ok(())
}
|