Merge pull request 'feat: Add iobench project and python dashboard' (#112) from feat/iobench into master
Reviewed-on: https://git.nationtech.io/NationTech/harmony/pulls/112
This commit is contained in:
commit
67b5c2df07
17
iobench/Cargo.toml
Normal file
17
iobench/Cargo.toml
Normal file
@ -0,0 +1,17 @@
|
||||
[package]
|
||||
name = "iobench"
|
||||
edition = "2024"
|
||||
version = "1.0.0"
|
||||
license = "AGPL-3.0-or-later"
|
||||
description = "A small command line utility to run fio benchmarks on localhost or remote ssh or kubernetes host. Was born out of a need to benchmark various ceph configurations!"
|
||||
|
||||
|
||||
[dependencies]
|
||||
clap = { version = "4.0", features = ["derive"] }
|
||||
chrono = "0.4"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
csv = "1.1"
|
||||
num_cpus = "1.13"
|
||||
|
||||
[workspace]
|
10
iobench/dash/README.md
Normal file
10
iobench/dash/README.md
Normal file
@ -0,0 +1,10 @@
|
||||
This project was generated mostly by Gemini but it works so... :)
|
||||
|
||||
## To run iobench dashboard
|
||||
|
||||
```bash
|
||||
virtualenv venv
|
||||
source venv/bin/activate
|
||||
pip install -r requirements_freeze.txt
|
||||
python iobench-dash-v4.py
|
||||
```
|
229
iobench/dash/iobench-dash.py
Normal file
229
iobench/dash/iobench-dash.py
Normal file
@ -0,0 +1,229 @@
|
||||
import dash
|
||||
from dash import dcc, html, Input, Output, State, clientside_callback, ClientsideFunction
|
||||
import plotly.express as px
|
||||
import pandas as pd
|
||||
import dash_bootstrap_components as dbc
|
||||
import io
|
||||
|
||||
# --- Data Loading and Preparation ---
|
||||
# csv_data = """label,test_name,iops,bandwidth_kibps,latency_mean_ms,latency_stddev_ms
|
||||
# Ceph HDD Only,read-4k-sync-test,1474.302,5897,0.673,0.591
|
||||
# Ceph HDD Only,write-4k-sync-test,14.126,56,27.074,7.046
|
||||
# Ceph HDD Only,randread-4k-sync-test,225.140,900,4.436,6.918
|
||||
# Ceph HDD Only,randwrite-4k-sync-test,13.129,52,34.891,10.859
|
||||
# Ceph HDD Only,multiread-4k-sync-test,6873.675,27494,0.578,0.764
|
||||
# Ceph HDD Only,multiwrite-4k-sync-test,57.135,228,38.660,11.293
|
||||
# Ceph HDD Only,multirandread-4k-sync-test,2451.376,9805,1.626,2.515
|
||||
# Ceph HDD Only,multirandwrite-4k-sync-test,54.642,218,33.492,13.111
|
||||
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,read-4k-sync-test,1495.700,5982,0.664,1.701
|
||||
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,write-4k-sync-test,16.990,67,17.502,9.908
|
||||
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,randread-4k-sync-test,159.256,637,6.274,9.232
|
||||
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,randwrite-4k-sync-test,16.693,66,24.094,16.099
|
||||
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multiread-4k-sync-test,7305.559,29222,0.544,1.338
|
||||
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multiwrite-4k-sync-test,52.260,209,34.891,17.576
|
||||
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multirandread-4k-sync-test,700.606,2802,5.700,10.429
|
||||
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multirandwrite-4k-sync-test,52.723,210,29.709,25.829
|
||||
# Ceph 2 Hosts WAL+DB SSD Only,randwrite-4k-sync-test,90.037,360,3.617,8.321
|
||||
# Ceph WAL+DB SSD During Rebuild,randwrite-4k-sync-test,41.008,164,10.138,19.333
|
||||
# Ceph WAL+DB SSD OSD HDD,read-4k-sync-test,1520.299,6081,0.654,1.539
|
||||
# Ceph WAL+DB SSD OSD HDD,write-4k-sync-test,78.528,314,4.074,9.101
|
||||
# Ceph WAL+DB SSD OSD HDD,randread-4k-sync-test,153.303,613,6.518,9.036
|
||||
# Ceph WAL+DB SSD OSD HDD,randwrite-4k-sync-test,48.677,194,8.785,20.356
|
||||
# Ceph WAL+DB SSD OSD HDD,multiread-4k-sync-test,6804.880,27219,0.584,1.422
|
||||
# Ceph WAL+DB SSD OSD HDD,multiwrite-4k-sync-test,311.513,1246,4.978,9.458
|
||||
# Ceph WAL+DB SSD OSD HDD,multirandread-4k-sync-test,581.756,2327,6.869,10.204
|
||||
# Ceph WAL+DB SSD OSD HDD,multirandwrite-4k-sync-test,120.556,482,13.463,25.440
|
||||
# """
|
||||
#
|
||||
# df = pd.read_csv(io.StringIO(csv_data))
|
||||
df = pd.read_csv("iobench.csv") # Replace with the actual file path
|
||||
df['bandwidth_mbps'] = df['bandwidth_kibps'] / 1024
|
||||
|
||||
# --- App Initialization and Global Settings ---
|
||||
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.FLATLY])
|
||||
|
||||
# Create master lists of options for checklists
|
||||
unique_labels = sorted(df['label'].unique())
|
||||
unique_tests = sorted(df['test_name'].unique())
|
||||
|
||||
# Create a consistent color map for each unique label
|
||||
color_map = {label: color for label, color in zip(unique_labels, px.colors.qualitative.Plotly)}
|
||||
|
||||
# --- App Layout ---
|
||||
app.layout = dbc.Container([
|
||||
# Header
|
||||
dbc.Row(dbc.Col(html.H1("Ceph iobench Performance Dashboard", className="text-primary"),), className="my-4 text-center"),
|
||||
|
||||
# Controls and Graphs Row
|
||||
dbc.Row([
|
||||
# Control Panel Column
|
||||
dbc.Col([
|
||||
dbc.Card([
|
||||
dbc.CardBody([
|
||||
html.H4("Control Panel", className="card-title"),
|
||||
html.Hr(),
|
||||
|
||||
# Metric Selection
|
||||
dbc.Label("1. Select Metrics to Display:", html_for="metric-checklist", className="fw-bold"),
|
||||
dcc.Checklist(
|
||||
id='metric-checklist',
|
||||
options=[
|
||||
{'label': 'IOPS', 'value': 'iops'},
|
||||
{'label': 'Latency (ms)', 'value': 'latency_mean_ms'},
|
||||
{'label': 'Bandwidth (MB/s)', 'value': 'bandwidth_mbps'}
|
||||
],
|
||||
value=['iops', 'latency_mean_ms', 'bandwidth_mbps'], # Default selection
|
||||
labelClassName="d-block"
|
||||
),
|
||||
html.Hr(),
|
||||
|
||||
# Configuration Selection
|
||||
dbc.Label("2. Select Configurations:", html_for="config-checklist", className="fw-bold"),
|
||||
dbc.ButtonGroup([
|
||||
dbc.Button("All", id="config-select-all", n_clicks=0, color="primary", outline=True, size="sm"),
|
||||
dbc.Button("None", id="config-select-none", n_clicks=0, color="primary", outline=True, size="sm"),
|
||||
], className="mb-2"),
|
||||
dcc.Checklist(
|
||||
id='config-checklist',
|
||||
options=[{'label': label, 'value': label} for label in unique_labels],
|
||||
value=unique_labels, # Select all by default
|
||||
labelClassName="d-block"
|
||||
),
|
||||
html.Hr(),
|
||||
|
||||
# Test Name Selection
|
||||
dbc.Label("3. Select Tests:", html_for="test-checklist", className="fw-bold"),
|
||||
dbc.ButtonGroup([
|
||||
dbc.Button("All", id="test-select-all", n_clicks=0, color="primary", outline=True, size="sm"),
|
||||
dbc.Button("None", id="test-select-none", n_clicks=0, color="primary", outline=True, size="sm"),
|
||||
], className="mb-2"),
|
||||
dcc.Checklist(
|
||||
id='test-checklist',
|
||||
options=[{'label': test, 'value': test} for test in unique_tests],
|
||||
value=unique_tests, # Select all by default
|
||||
labelClassName="d-block"
|
||||
),
|
||||
])
|
||||
], className="mb-4")
|
||||
], width=12, lg=4),
|
||||
|
||||
# Graph Display Column
|
||||
dbc.Col(id='graph-container', width=12, lg=8)
|
||||
])
|
||||
], fluid=True)
|
||||
|
||||
|
||||
# --- Callbacks ---
|
||||
|
||||
# Callback to handle "Select All" / "Select None" for configurations
|
||||
@app.callback(
|
||||
Output('config-checklist', 'value'),
|
||||
Input('config-select-all', 'n_clicks'),
|
||||
Input('config-select-none', 'n_clicks'),
|
||||
prevent_initial_call=True
|
||||
)
|
||||
def select_all_none_configs(all_clicks, none_clicks):
|
||||
ctx = dash.callback_context
|
||||
if not ctx.triggered:
|
||||
return dash.no_update
|
||||
|
||||
button_id = ctx.triggered[0]['prop_id'].split('.')[0]
|
||||
if button_id == 'config-select-all':
|
||||
return unique_labels
|
||||
elif button_id == 'config-select-none':
|
||||
return []
|
||||
return dash.no_update
|
||||
|
||||
# Callback to handle "Select All" / "Select None" for tests
|
||||
@app.callback(
|
||||
Output('test-checklist', 'value'),
|
||||
Input('test-select-all', 'n_clicks'),
|
||||
Input('test-select-none', 'n_clicks'),
|
||||
prevent_initial_call=True
|
||||
)
|
||||
def select_all_none_tests(all_clicks, none_clicks):
|
||||
ctx = dash.callback_context
|
||||
if not ctx.triggered:
|
||||
return dash.no_update
|
||||
|
||||
button_id = ctx.triggered[0]['prop_id'].split('.')[0]
|
||||
if button_id == 'test-select-all':
|
||||
return unique_tests
|
||||
elif button_id == 'test-select-none':
|
||||
return []
|
||||
return dash.no_update
|
||||
|
||||
|
||||
# Main callback to update graphs based on all selections
|
||||
@app.callback(
|
||||
Output('graph-container', 'children'),
|
||||
[Input('metric-checklist', 'value'),
|
||||
Input('config-checklist', 'value'),
|
||||
Input('test-checklist', 'value')]
|
||||
)
|
||||
def update_graphs(selected_metrics, selected_configs, selected_tests):
|
||||
"""
|
||||
This function is triggered when any control's value changes.
|
||||
It generates and returns a list of graphs based on all user selections.
|
||||
"""
|
||||
# Handle cases where no selection is made to prevent errors and show a helpful message
|
||||
if not all([selected_metrics, selected_configs, selected_tests]):
|
||||
return dbc.Alert(
|
||||
"Please select at least one item from each category (Metric, Configuration, and Test) to view data.",
|
||||
color="info",
|
||||
className="mt-4"
|
||||
)
|
||||
|
||||
# Filter the DataFrame based on all selected criteria
|
||||
filtered_df = df[df['label'].isin(selected_configs) & df['test_name'].isin(selected_tests)]
|
||||
|
||||
# If the filtered data is empty after selection, inform the user
|
||||
if filtered_df.empty:
|
||||
return dbc.Alert("No data available for the current selection.", color="warning", className="mt-4")
|
||||
|
||||
graph_list = []
|
||||
metric_titles = {
|
||||
'iops': 'IOPS Comparison (Higher is Better)',
|
||||
'latency_mean_ms': 'Mean Latency (ms) Comparison (Lower is Better)',
|
||||
'bandwidth_mbps': 'Bandwidth (MB/s) Comparison (Higher is Better)'
|
||||
}
|
||||
|
||||
for metric in selected_metrics:
|
||||
sort_order = 'total ascending' if metric == 'latency_mean_ms' else 'total descending'
|
||||
error_y_param = 'latency_stddev_ms' if metric == 'latency_mean_ms' else None
|
||||
|
||||
fig = px.bar(
|
||||
filtered_df,
|
||||
x='test_name',
|
||||
y=metric,
|
||||
color='label',
|
||||
barmode='group',
|
||||
color_discrete_map=color_map,
|
||||
error_y=error_y_param,
|
||||
title=metric_titles.get(metric, metric),
|
||||
labels={
|
||||
"test_name": "Benchmark Test Name",
|
||||
"iops": "IOPS",
|
||||
"latency_mean_ms": "Mean Latency (ms)",
|
||||
"bandwidth_mbps": "Bandwidth (MB/s)",
|
||||
"label": "Cluster Configuration"
|
||||
}
|
||||
)
|
||||
|
||||
fig.update_layout(
|
||||
height=500,
|
||||
xaxis_title=None,
|
||||
legend_title="Configuration",
|
||||
title_x=0.5,
|
||||
xaxis={'categoryorder': sort_order},
|
||||
xaxis_tickangle=-45,
|
||||
margin=dict(b=120) # Add bottom margin to prevent tick labels from being cut off
|
||||
)
|
||||
|
||||
graph_list.append(dbc.Row(dbc.Col(dcc.Graph(figure=fig)), className="mb-4"))
|
||||
|
||||
return graph_list
|
||||
|
||||
# --- Run the App ---
|
||||
if __name__ == '__main__':
|
||||
app.run(debug=True)
|
29
iobench/dash/requirements_freeze.txt
Normal file
29
iobench/dash/requirements_freeze.txt
Normal file
@ -0,0 +1,29 @@
|
||||
blinker==1.9.0
|
||||
certifi==2025.7.14
|
||||
charset-normalizer==3.4.2
|
||||
click==8.2.1
|
||||
dash==3.2.0
|
||||
dash-bootstrap-components==2.0.3
|
||||
Flask==3.1.1
|
||||
idna==3.10
|
||||
importlib_metadata==8.7.0
|
||||
itsdangerous==2.2.0
|
||||
Jinja2==3.1.6
|
||||
MarkupSafe==3.0.2
|
||||
narwhals==2.0.1
|
||||
nest-asyncio==1.6.0
|
||||
numpy==2.3.2
|
||||
packaging==25.0
|
||||
pandas==2.3.1
|
||||
plotly==6.2.0
|
||||
python-dateutil==2.9.0.post0
|
||||
pytz==2025.2
|
||||
requests==2.32.4
|
||||
retrying==1.4.1
|
||||
setuptools==80.9.0
|
||||
six==1.17.0
|
||||
typing_extensions==4.14.1
|
||||
tzdata==2025.2
|
||||
urllib3==2.5.0
|
||||
Werkzeug==3.1.3
|
||||
zipp==3.23.0
|
41
iobench/deployment.yaml
Normal file
41
iobench/deployment.yaml
Normal file
@ -0,0 +1,41 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: iobench
|
||||
labels:
|
||||
app: iobench
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: iobench
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: iobench
|
||||
spec:
|
||||
containers:
|
||||
- name: fio
|
||||
image: juicedata/fio:latest # Replace with your preferred fio image
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: [ "sleep", "infinity" ] # Keeps the container running for kubectl exec
|
||||
volumeMounts:
|
||||
- name: iobench-pvc
|
||||
mountPath: /data # Mount the PVC at /data
|
||||
volumes:
|
||||
- name: iobench-pvc
|
||||
persistentVolumeClaim:
|
||||
claimName: iobench-pvc # Matches your PVC name
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: iobench-pvc
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 5Gi
|
||||
storageClassName: ceph-block
|
||||
|
253
iobench/src/main.rs
Normal file
253
iobench/src/main.rs
Normal file
@ -0,0 +1,253 @@
|
||||
use std::fs;
|
||||
use std::io::{self, Write};
|
||||
use std::process::{Command, Stdio};
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
use chrono::Local;
|
||||
use clap::Parser;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// A simple yet powerful I/O benchmarking tool using fio.
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(author, version, about, long_about = None)]
|
||||
struct Args {
|
||||
/// Target for the benchmark.
|
||||
/// Formats:
|
||||
/// - localhost (default)
|
||||
/// - ssh/{user}@{host}
|
||||
/// - ssh/{user}@{host}:{port}
|
||||
/// - k8s/{namespace}/{pod}
|
||||
#[arg(short, long, default_value = "localhost")]
|
||||
target: String,
|
||||
|
||||
#[arg(short, long, default_value = ".")]
|
||||
benchmark_dir: String,
|
||||
|
||||
/// Comma-separated list of tests to run.
|
||||
/// Available tests: read, write, randread, randwrite,
|
||||
/// multiread, multiwrite, multirandread, multirandwrite.
|
||||
#[arg(long, default_value = "read,write,randread,randwrite,multiread,multiwrite,multirandread,multirandwrite")]
|
||||
tests: String,
|
||||
|
||||
/// Duration of each test in seconds.
|
||||
#[arg(long, default_value_t = 15)]
|
||||
duration: u64,
|
||||
|
||||
/// Output directory for results.
|
||||
/// Defaults to ./iobench-{current_datetime}.
|
||||
#[arg(long)]
|
||||
output_dir: Option<String>,
|
||||
|
||||
/// The size of the test file for fio.
|
||||
#[arg(long, default_value = "1G")]
|
||||
size: String,
|
||||
|
||||
/// The block size for I/O operations.
|
||||
#[arg(long, default_value = "4k")]
|
||||
block_size: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct FioOutput {
|
||||
jobs: Vec<FioJobResult>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct FioJobResult {
|
||||
jobname: String,
|
||||
read: FioMetrics,
|
||||
write: FioMetrics,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct FioMetrics {
|
||||
bw: f64,
|
||||
iops: f64,
|
||||
clat_ns: LatencyMetrics,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct LatencyMetrics {
|
||||
mean: f64,
|
||||
stddev: f64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
struct BenchmarkResult {
|
||||
test_name: String,
|
||||
iops: f64,
|
||||
bandwidth_kibps: f64,
|
||||
latency_mean_ms: f64,
|
||||
latency_stddev_ms: f64,
|
||||
}
|
||||
|
||||
fn main() -> io::Result<()> {
|
||||
let args = Args::parse();
|
||||
|
||||
let output_dir = args.output_dir.unwrap_or_else(|| {
|
||||
format!("./iobench-{}", Local::now().format("%Y-%m-%d-%H%M%S"))
|
||||
});
|
||||
fs::create_dir_all(&output_dir)?;
|
||||
|
||||
let tests_to_run: Vec<&str> = args.tests.split(',').collect();
|
||||
let mut results = Vec::new();
|
||||
|
||||
for test in tests_to_run {
|
||||
println!("--------------------------------------------------");
|
||||
println!("Running test: {}", test);
|
||||
|
||||
let (rw, numjobs) = match test {
|
||||
"read" => ("read", 1),
|
||||
"write" => ("write", 1),
|
||||
"randread" => ("randread", 1),
|
||||
"randwrite" => ("randwrite", 1),
|
||||
"multiread" => ("read", 4),
|
||||
"multiwrite" => ("write", 4),
|
||||
"multirandread" => ("randread", 4),
|
||||
"multirandwrite" => ("randwrite", 4),
|
||||
_ => {
|
||||
eprintln!("Unknown test: {}. Skipping.", test);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let test_name = format!("{}-{}-sync-test", test, args.block_size);
|
||||
let fio_command = format!(
|
||||
"fio --filename={}/iobench_testfile --direct=1 --fsync=1 --rw={} --bs={} --numjobs={} --iodepth=1 --runtime={} --time_based --group_reporting --name={} --size={} --output-format=json",
|
||||
args.benchmark_dir, rw, args.block_size, numjobs, args.duration, test_name, args.size
|
||||
);
|
||||
|
||||
println!("Executing command:\n{}\n", fio_command);
|
||||
|
||||
let output = match run_command(&args.target, &fio_command) {
|
||||
Ok(out) => out,
|
||||
Err(e) => {
|
||||
eprintln!("Failed to execute command for test {}: {}", test, e);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
let result = parse_fio_output(&output, &test_name, rw);
|
||||
// TODO store raw fio output and print it
|
||||
match result {
|
||||
Ok(res) => {
|
||||
results.push(res);
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Error parsing fio output for test {}: {}", test, e);
|
||||
eprintln!("Raw output:\n{}", output);
|
||||
}
|
||||
}
|
||||
|
||||
println!("{output}");
|
||||
println!("Test {} completed.", test);
|
||||
// A brief pause to let the system settle before the next test.
|
||||
thread::sleep(Duration::from_secs(2));
|
||||
}
|
||||
|
||||
// Cleanup the test file on the target
|
||||
println!("--------------------------------------------------");
|
||||
println!("Cleaning up test file on target...");
|
||||
let cleanup_command = "rm -f ./iobench_testfile";
|
||||
if let Err(e) = run_command(&args.target, cleanup_command) {
|
||||
eprintln!("Warning: Failed to clean up test file on target: {}", e);
|
||||
} else {
|
||||
println!("Cleanup successful.");
|
||||
}
|
||||
|
||||
|
||||
if results.is_empty() {
|
||||
println!("\nNo benchmark results to display.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Output results to a CSV file for easy analysis
|
||||
let csv_path = format!("{}/summary.csv", output_dir);
|
||||
let mut wtr = csv::Writer::from_path(&csv_path)?;
|
||||
for result in &results {
|
||||
wtr.serialize(result)?;
|
||||
}
|
||||
wtr.flush()?;
|
||||
|
||||
println!("\nBenchmark summary saved to {}", csv_path);
|
||||
println!("\n--- Benchmark Results Summary ---");
|
||||
println!("{:<25} {:>10} {:>18} {:>20} {:>22}", "Test Name", "IOPS", "Bandwidth (KiB/s)", "Latency Mean (ms)", "Latency StdDev (ms)");
|
||||
println!("{:-<98}", "");
|
||||
for result in results {
|
||||
println!("{:<25} {:>10.2} {:>18.2} {:>20.4} {:>22.4}", result.test_name, result.iops, result.bandwidth_kibps, result.latency_mean_ms, result.latency_stddev_ms);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_command(target: &str, command: &str) -> io::Result<String> {
|
||||
let (program, args) = if target == "localhost" {
|
||||
("sudo", vec!["sh".to_string(), "-c".to_string(), command.to_string()])
|
||||
} else if target.starts_with("ssh/") {
|
||||
let target_str = target.strip_prefix("ssh/").unwrap();
|
||||
let ssh_target;
|
||||
let mut ssh_args = vec!["-o".to_string(), "StrictHostKeyChecking=no".to_string()];
|
||||
let port_parts: Vec<&str> = target_str.split(':').collect();
|
||||
if port_parts.len() == 2 {
|
||||
ssh_target = port_parts[0].to_string();
|
||||
ssh_args.push("-p".to_string());
|
||||
ssh_args.push(port_parts[1].to_string());
|
||||
} else {
|
||||
ssh_target = target_str.to_string();
|
||||
}
|
||||
ssh_args.push(ssh_target);
|
||||
ssh_args.push(format!("sudo sh -c '{}'", command));
|
||||
("ssh", ssh_args)
|
||||
} else if target.starts_with("k8s/") {
|
||||
let parts: Vec<&str> = target.strip_prefix("k8s/").unwrap().split('/').collect();
|
||||
if parts.len() != 2 {
|
||||
return Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid k8s target format. Expected k8s/{namespace}/{pod}"));
|
||||
}
|
||||
let namespace = parts[0];
|
||||
let pod = parts[1];
|
||||
("kubectl", vec!["exec".to_string(), "-n".to_string(), namespace.to_string(), pod.to_string(), "--".to_string(), "sh".to_string(), "-c".to_string(), command.to_string()])
|
||||
} else {
|
||||
return Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid target format"));
|
||||
};
|
||||
|
||||
let mut cmd = Command::new(program);
|
||||
cmd.args(&args);
|
||||
cmd.stdout(Stdio::piped()).stderr(Stdio::piped());
|
||||
|
||||
let child = cmd.spawn()?;
|
||||
let output = child.wait_with_output()?;
|
||||
|
||||
if !output.status.success() {
|
||||
eprintln!("Command failed with status: {}", output.status);
|
||||
io::stderr().write_all(&output.stderr)?;
|
||||
return Err(io::Error::new(io::ErrorKind::Other, "Command execution failed"));
|
||||
}
|
||||
|
||||
String::from_utf8(output.stdout)
|
||||
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
|
||||
}
|
||||
|
||||
fn parse_fio_output(output: &str, test_name: &str, rw: &str) -> Result<BenchmarkResult, String> {
|
||||
let fio_data: FioOutput = serde_json::from_str(output)
|
||||
.map_err(|e| format!("Failed to deserialize fio JSON: {}", e))?;
|
||||
|
||||
let job_result = fio_data.jobs.iter()
|
||||
.find(|j| j.jobname == test_name)
|
||||
.ok_or_else(|| format!("Could not find job result for '{}' in fio output", test_name))?;
|
||||
|
||||
let metrics = if rw.contains("read") {
|
||||
&job_result.read
|
||||
} else {
|
||||
&job_result.write
|
||||
};
|
||||
|
||||
Ok(BenchmarkResult {
|
||||
test_name: test_name.to_string(),
|
||||
iops: metrics.iops,
|
||||
bandwidth_kibps: metrics.bw,
|
||||
latency_mean_ms: metrics.clat_ns.mean / 1_000_000.0,
|
||||
latency_stddev_ms: metrics.clat_ns.stddev / 1_000_000.0,
|
||||
})
|
||||
}
|
Loading…
Reference in New Issue
Block a user