feat: Add iobench project and python dashboard
All checks were successful
Run Check Script / check (pull_request) Successful in 1m3s
All checks were successful
Run Check Script / check (pull_request) Successful in 1m3s
This commit is contained in:
parent
bd214f8fb8
commit
fd8f643a8f
15
iobench/Cargo.toml
Normal file
15
iobench/Cargo.toml
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
[package]
|
||||||
|
name = "iobench"
|
||||||
|
edition = "2024"
|
||||||
|
version = "1.0.0"
|
||||||
|
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
clap = { version = "4.0", features = ["derive"] }
|
||||||
|
chrono = "0.4"
|
||||||
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
serde_json = "1.0"
|
||||||
|
csv = "1.1"
|
||||||
|
num_cpus = "1.13"
|
||||||
|
|
||||||
|
[workspace]
|
10
iobench/dash/README.md
Normal file
10
iobench/dash/README.md
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
This project was generated mostly by Gemini but it works so... :)
|
||||||
|
|
||||||
|
## To run iobench dashboard
|
||||||
|
|
||||||
|
```bash
|
||||||
|
virtualenv venv
|
||||||
|
source venv/bin/activate
|
||||||
|
pip install -r requirements_freeze.txt
|
||||||
|
python iobench-dash-v4.py
|
||||||
|
```
|
109
iobench/dash/iobench-dash-v1.py
Normal file
109
iobench/dash/iobench-dash-v1.py
Normal file
@ -0,0 +1,109 @@
|
|||||||
|
from dash import Dash, dcc, html, Input, Output
|
||||||
|
import plotly.graph_objects as go
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
# Load the CSV data
|
||||||
|
df = pd.read_csv("iobench.csv") # Replace with the actual file path
|
||||||
|
|
||||||
|
# Initialize Dash app
|
||||||
|
app = Dash(__name__)
|
||||||
|
|
||||||
|
# Layout
|
||||||
|
app.layout = html.Div(
|
||||||
|
[
|
||||||
|
html.H1("IOBench Results Viewer", style={"textAlign": "center"}),
|
||||||
|
|
||||||
|
# Filters
|
||||||
|
html.Div(
|
||||||
|
[
|
||||||
|
html.Label("Filter by Label:"),
|
||||||
|
dcc.Dropdown(
|
||||||
|
id="label-filter",
|
||||||
|
options=[{"label": label, "value": label} for label in df["label"].unique()],
|
||||||
|
value=df["label"].unique().tolist(),
|
||||||
|
multi=True,
|
||||||
|
),
|
||||||
|
html.Label("Filter by Test Name:"),
|
||||||
|
dcc.Dropdown(
|
||||||
|
id="test-filter",
|
||||||
|
options=[{"label": test, "value": test} for test in df["test_name"].unique()],
|
||||||
|
value=df["test_name"].unique().tolist(),
|
||||||
|
multi=True,
|
||||||
|
),
|
||||||
|
],
|
||||||
|
style={"width": "25%", "display": "inline-block", "verticalAlign": "top", "padding": "10px"},
|
||||||
|
),
|
||||||
|
|
||||||
|
# Graphs
|
||||||
|
html.Div(
|
||||||
|
[
|
||||||
|
dcc.Graph(id="throughput-graph"),
|
||||||
|
dcc.Graph(id="latency-graph"),
|
||||||
|
],
|
||||||
|
style={"width": "70%", "display": "inline-block", "padding": "10px"},
|
||||||
|
),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Callbacks
|
||||||
|
@app.callback(
|
||||||
|
[Output("throughput-graph", "figure"), Output("latency-graph", "figure")],
|
||||||
|
[Input("label-filter", "value"), Input("test-filter", "value")],
|
||||||
|
)
|
||||||
|
def update_graphs(selected_labels, selected_tests):
|
||||||
|
# Filter data
|
||||||
|
filtered_df = df[df["label"].isin(selected_labels) & df["test_name"].isin(selected_tests)]
|
||||||
|
|
||||||
|
# Throughput Graph
|
||||||
|
throughput_fig = go.Figure()
|
||||||
|
for label in filtered_df["label"].unique():
|
||||||
|
subset = filtered_df[filtered_df["label"] == label]
|
||||||
|
throughput_fig.add_trace(
|
||||||
|
go.Bar(
|
||||||
|
x=subset["test_name"],
|
||||||
|
y=subset["iops"],
|
||||||
|
name=f"{label} - IOPS",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
throughput_fig.add_trace(
|
||||||
|
go.Bar(
|
||||||
|
x=subset["test_name"],
|
||||||
|
y=subset["bandwidth_kibps"],
|
||||||
|
name=f"{label} - Bandwidth (KiB/s)",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
throughput_fig.update_layout(
|
||||||
|
title="Throughput (IOPS and Bandwidth)",
|
||||||
|
xaxis_title="Test Name",
|
||||||
|
yaxis_title="Value",
|
||||||
|
barmode="group",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Latency Graph
|
||||||
|
latency_fig = go.Figure()
|
||||||
|
for label in filtered_df["label"].unique():
|
||||||
|
subset = filtered_df[filtered_df["label"] == label]
|
||||||
|
latency_fig.add_trace(
|
||||||
|
go.Scatter(
|
||||||
|
x=subset["test_name"],
|
||||||
|
y=subset["latency_mean_ms"],
|
||||||
|
mode="markers+lines",
|
||||||
|
name=f"{label} - Latency Mean (ms)",
|
||||||
|
error_y=dict(
|
||||||
|
type="data",
|
||||||
|
array=subset["latency_stddev_ms"],
|
||||||
|
visible=True,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
latency_fig.update_layout(
|
||||||
|
title="Latency with Standard Deviation",
|
||||||
|
xaxis_title="Test Name",
|
||||||
|
yaxis_title="Latency (ms)",
|
||||||
|
)
|
||||||
|
|
||||||
|
return throughput_fig, latency_fig
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
app.run_server(debug=True)
|
149
iobench/dash/iobench-dash-v2.py
Normal file
149
iobench/dash/iobench-dash-v2.py
Normal file
@ -0,0 +1,149 @@
|
|||||||
|
import dash
|
||||||
|
from dash import dcc, html, Input, Output
|
||||||
|
import plotly.express as px
|
||||||
|
import pandas as pd
|
||||||
|
import dash_bootstrap_components as dbc
|
||||||
|
import io
|
||||||
|
|
||||||
|
# --- Sample Data ---
|
||||||
|
# In a real-world scenario, you would load this from a CSV file.
|
||||||
|
# For this self-contained example, we define the data directly.
|
||||||
|
# Example: df = pd.read_csv('benchmark_data.csv')
|
||||||
|
csv_data = """
|
||||||
|
config,op_type,iops,latency_ms,throughput_mbs
|
||||||
|
All-HDD,4k_random_read,260,60,1.02
|
||||||
|
All-HDD,4k_random_write,100,150,0.39
|
||||||
|
All-HDD,64k_sequential_read,2100,30,131.25
|
||||||
|
All-HDD,64k_sequential_write,1500,42,93.75
|
||||||
|
HDD+SSD_WAL,4k_random_read,270,58,1.05
|
||||||
|
HDD+SSD_WAL,4k_random_write,160,100,0.62
|
||||||
|
HDD+SSD_WAL,64k_sequential_read,2150,29,134.37
|
||||||
|
HDD+SSD_WAL,64k_sequential_write,1800,35,112.5
|
||||||
|
HDD+SSD_WAL_DB,4k_random_read,1250,12,4.88
|
||||||
|
HDD+SSD_WAL_DB,4k_random_write,1550,10,6.05
|
||||||
|
HDD+SSD_WAL_DB,64k_sequential_read,2200,28,137.5
|
||||||
|
HDD+SSD_WAL_DB,64k_sequential_write,2000,32,125
|
||||||
|
All-NVMe,4k_random_read,400000,0.1,1562.5
|
||||||
|
All-NVMe,4k_random_write,350000,0.12,1367.18
|
||||||
|
All-NVMe,64k_sequential_read,16000,4,1000
|
||||||
|
All-NVMe,64k_sequential_write,12500,5,800
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Read the data using pandas
|
||||||
|
df = pd.read_csv(io.StringIO(csv_data))
|
||||||
|
|
||||||
|
# Initialize the Dash app with a Bootstrap theme
|
||||||
|
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.FLATLY])
|
||||||
|
|
||||||
|
# --- App Layout ---
|
||||||
|
app.layout = dbc.Container([
|
||||||
|
# Header
|
||||||
|
dbc.Row([
|
||||||
|
dbc.Col([
|
||||||
|
html.H1("Ceph Cluster Benchmark Visualizer", className="text-primary"),
|
||||||
|
html.P(
|
||||||
|
"An interactive tool to compare performance metrics across different Ceph storage configurations.",
|
||||||
|
className="lead"
|
||||||
|
)
|
||||||
|
])
|
||||||
|
], className="my-4"),
|
||||||
|
|
||||||
|
# Controls and Graphs Row
|
||||||
|
dbc.Row([
|
||||||
|
# Control Panel Column
|
||||||
|
dbc.Col([
|
||||||
|
dbc.Card([
|
||||||
|
dbc.CardBody([
|
||||||
|
html.H4("Control Panel", className="card-title"),
|
||||||
|
html.Hr(),
|
||||||
|
|
||||||
|
# Metric Selection Radio Buttons
|
||||||
|
dbc.Label("Select Metric to Display:", html_for="metric-selector"),
|
||||||
|
dcc.RadioItems(
|
||||||
|
id='metric-selector',
|
||||||
|
options=[
|
||||||
|
{'label': 'IOPS (Input/Output Operations Per Second)', 'value': 'iops'},
|
||||||
|
{'label': 'Latency (in Milliseconds)', 'value': 'latency_ms'},
|
||||||
|
{'label': 'Throughput (in MB/s)', 'value': 'throughput_mbs'}
|
||||||
|
],
|
||||||
|
value='iops', # Default value
|
||||||
|
labelClassName="d-block" # Display labels as blocks
|
||||||
|
),
|
||||||
|
html.Hr(),
|
||||||
|
|
||||||
|
# Configuration Selection Checklist
|
||||||
|
dbc.Label("Select Configurations to Compare:", html_for="config-checklist"),
|
||||||
|
dcc.Checklist(
|
||||||
|
id='config-checklist',
|
||||||
|
options=[{'label': config, 'value': config} for config in df['config'].unique()],
|
||||||
|
value=df['config'].unique(), # Select all by default
|
||||||
|
labelClassName="d-block"
|
||||||
|
),
|
||||||
|
])
|
||||||
|
], className="mb-4")
|
||||||
|
], width=12, lg=4), # Full width on small screens, 1/3 on large
|
||||||
|
|
||||||
|
# Graph Display Column
|
||||||
|
dbc.Col([
|
||||||
|
dcc.Graph(id='benchmark-graph')
|
||||||
|
], width=12, lg=8) # Full width on small screens, 2/3 on large
|
||||||
|
])
|
||||||
|
], fluid=True) # Use a fluid container for full-width layout
|
||||||
|
|
||||||
|
|
||||||
|
# --- Callback Function ---
|
||||||
|
# This function connects the controls to the graph
|
||||||
|
@app.callback(
|
||||||
|
Output('benchmark-graph', 'figure'),
|
||||||
|
[Input('metric-selector', 'value'),
|
||||||
|
Input('config-checklist', 'value')]
|
||||||
|
)
|
||||||
|
def update_graph(selected_metric, selected_configs):
|
||||||
|
"""
|
||||||
|
This function is triggered whenever a control's value changes.
|
||||||
|
It filters the dataframe and returns an updated bar chart figure.
|
||||||
|
"""
|
||||||
|
if not selected_configs:
|
||||||
|
# If no configs are selected, return an empty figure to avoid errors
|
||||||
|
return go.Figure().update_layout(
|
||||||
|
title="Please select a configuration to view data.",
|
||||||
|
xaxis_title="",
|
||||||
|
yaxis_title=""
|
||||||
|
)
|
||||||
|
|
||||||
|
# Filter the DataFrame based on the selected configurations
|
||||||
|
filtered_df = df[df['config'].isin(selected_configs)]
|
||||||
|
|
||||||
|
# Create the bar chart using Plotly Express
|
||||||
|
fig = px.bar(
|
||||||
|
filtered_df,
|
||||||
|
x='op_type',
|
||||||
|
y=selected_metric,
|
||||||
|
color='config',
|
||||||
|
barmode='group', # Group bars for different configs side-by-side
|
||||||
|
labels={
|
||||||
|
"op_type": "Benchmark Operation Type",
|
||||||
|
"iops": "IOPS (Higher is Better)",
|
||||||
|
"latency_ms": "Latency in ms (Lower is Better)",
|
||||||
|
"throughput_mbs": "Throughput in MB/s (Higher is Better)",
|
||||||
|
"config": "Storage Configuration"
|
||||||
|
},
|
||||||
|
title=f"Benchmark Comparison for: {selected_metric.replace('_', ' ').title()}",
|
||||||
|
height=600 # Set a fixed height for the graph
|
||||||
|
)
|
||||||
|
|
||||||
|
# Update layout for better readability
|
||||||
|
fig.update_layout(
|
||||||
|
xaxis_title="Operation Type",
|
||||||
|
yaxis_title=selected_metric.replace('_', ' ').title(),
|
||||||
|
legend_title="Configuration",
|
||||||
|
title_x=0.5, # Center the title
|
||||||
|
xaxis={'categoryorder':'total descending' if selected_metric != 'latency_ms' else 'total ascending'}
|
||||||
|
)
|
||||||
|
|
||||||
|
return fig
|
||||||
|
|
||||||
|
# --- Run the App ---
|
||||||
|
if __name__ == '__main__':
|
||||||
|
# Use debug=True for development, allowing hot-reloading
|
||||||
|
app.run(debug=True)
|
175
iobench/dash/iobench-dash-v3.py
Normal file
175
iobench/dash/iobench-dash-v3.py
Normal file
@ -0,0 +1,175 @@
|
|||||||
|
import dash
|
||||||
|
from dash import dcc, html, Input, Output
|
||||||
|
import plotly.express as px
|
||||||
|
import pandas as pd
|
||||||
|
import dash_bootstrap_components as dbc
|
||||||
|
import io
|
||||||
|
import plotly.graph_objects as go
|
||||||
|
|
||||||
|
# --- Data Loading and Preparation ---
|
||||||
|
# 1. Use the exact iobench csv output format provided.
|
||||||
|
csv_data = """label,test_name,iops,bandwidth_kibps,latency_mean_ms,latency_stddev_ms
|
||||||
|
Ceph HDD Only,read-4k-sync-test,1474.302,5897,0.673,0.591
|
||||||
|
Ceph HDD Only,write-4k-sync-test,14.126,56,27.074,7.046
|
||||||
|
Ceph HDD Only,randread-4k-sync-test,225.140,900,4.436,6.918
|
||||||
|
Ceph HDD Only,randwrite-4k-sync-test,13.129,52,34.891,10.859
|
||||||
|
Ceph HDD Only,multiread-4k-sync-test,6873.675,27494,0.578,0.764
|
||||||
|
Ceph HDD Only,multiwrite-4k-sync-test,57.135,228,38.660,11.293
|
||||||
|
Ceph HDD Only,multirandread-4k-sync-test,2451.376,9805,1.626,2.515
|
||||||
|
Ceph HDD Only,multirandwrite-4k-sync-test,54.642,218,33.492,13.111
|
||||||
|
Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,read-4k-sync-test,1495.700,5982,0.664,1.701
|
||||||
|
Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,write-4k-sync-test,16.990,67,17.502,9.908
|
||||||
|
Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,randread-4k-sync-test,159.256,637,6.274,9.232
|
||||||
|
Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,randwrite-4k-sync-test,16.693,66,24.094,16.099
|
||||||
|
Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multiread-4k-sync-test,7305.559,29222,0.544,1.338
|
||||||
|
Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multiwrite-4k-sync-test,52.260,209,34.891,17.576
|
||||||
|
Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multirandread-4k-sync-test,700.606,2802,5.700,10.429
|
||||||
|
Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multirandwrite-4k-sync-test,52.723,210,29.709,25.829
|
||||||
|
Ceph 2 Hosts WAL+DB SSD Only,randwrite-4k-sync-test,90.037,360,3.617,8.321
|
||||||
|
Ceph WAL+DB SSD During Rebuild,randwrite-4k-sync-test,41.008,164,10.138,19.333
|
||||||
|
Ceph WAL+DB SSD OSD HDD,read-4k-sync-test,1520.299,6081,0.654,1.539
|
||||||
|
Ceph WAL+DB SSD OSD HDD,write-4k-sync-test,78.528,314,4.074,9.101
|
||||||
|
Ceph WAL+DB SSD OSD HDD,randread-4k-sync-test,153.303,613,6.518,9.036
|
||||||
|
Ceph WAL+DB SSD OSD HDD,randwrite-4k-sync-test,48.677,194,8.785,20.356
|
||||||
|
Ceph WAL+DB SSD OSD HDD,multiread-4k-sync-test,6804.880,27219,0.584,1.422
|
||||||
|
Ceph WAL+DB SSD OSD HDD,multiwrite-4k-sync-test,311.513,1246,4.978,9.458
|
||||||
|
Ceph WAL+DB SSD OSD HDD,multirandread-4k-sync-test,581.756,2327,6.869,10.204
|
||||||
|
Ceph WAL+DB SSD OSD HDD,multirandwrite-4k-sync-test,120.556,482,13.463,25.440
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Read the data and create a more user-friendly bandwidth column in MB/s
|
||||||
|
df = pd.read_csv(io.StringIO(csv_data))
|
||||||
|
df['bandwidth_mbps'] = df['bandwidth_kibps'] / 1024
|
||||||
|
|
||||||
|
# --- App Initialization and Global Settings ---
|
||||||
|
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.FLATLY])
|
||||||
|
|
||||||
|
# 3. Create a consistent color map for each unique label (cluster topology).
|
||||||
|
unique_labels = df['label'].unique()
|
||||||
|
color_map = {label: color for label, color in zip(unique_labels, px.colors.qualitative.Plotly)}
|
||||||
|
|
||||||
|
# --- App Layout ---
|
||||||
|
app.layout = dbc.Container([
|
||||||
|
# Header
|
||||||
|
dbc.Row([
|
||||||
|
dbc.Col([
|
||||||
|
html.H1("Ceph iobench Performance Dashboard", className="text-primary"),
|
||||||
|
html.P(
|
||||||
|
"Compare benchmark results across different Ceph cluster configurations and metrics.",
|
||||||
|
className="lead"
|
||||||
|
)
|
||||||
|
])
|
||||||
|
], className="my-4"),
|
||||||
|
|
||||||
|
# Controls and Graphs Row
|
||||||
|
dbc.Row([
|
||||||
|
# Control Panel Column
|
||||||
|
dbc.Col([
|
||||||
|
dbc.Card([
|
||||||
|
dbc.CardBody([
|
||||||
|
html.H4("Control Panel", className="card-title"),
|
||||||
|
html.Hr(),
|
||||||
|
|
||||||
|
# 2. Metric Selection Checklist to view multiple graphs
|
||||||
|
dbc.Label("Select Metrics to Display:", html_for="metric-checklist", className="fw-bold"),
|
||||||
|
dcc.Checklist(
|
||||||
|
id='metric-checklist',
|
||||||
|
options=[
|
||||||
|
{'label': 'IOPS', 'value': 'iops'},
|
||||||
|
{'label': 'Latency (ms)', 'value': 'latency_mean_ms'},
|
||||||
|
{'label': 'Bandwidth (MB/s)', 'value': 'bandwidth_mbps'}
|
||||||
|
],
|
||||||
|
value=['iops', 'latency_mean_ms'], # Default selection
|
||||||
|
labelClassName="d-block"
|
||||||
|
),
|
||||||
|
html.Hr(),
|
||||||
|
|
||||||
|
# Configuration Selection Checklist
|
||||||
|
dbc.Label("Select Configurations to Compare:", html_for="config-checklist", className="fw-bold"),
|
||||||
|
dcc.Checklist(
|
||||||
|
id='config-checklist',
|
||||||
|
options=[{'label': label, 'value': label} for label in unique_labels],
|
||||||
|
value=unique_labels, # Select all by default
|
||||||
|
labelClassName="d-block"
|
||||||
|
),
|
||||||
|
])
|
||||||
|
], className="mb-4")
|
||||||
|
], width=12, lg=4),
|
||||||
|
|
||||||
|
# Graph Display Column - This will be populated by the callback
|
||||||
|
dbc.Col(id='graph-container', width=12, lg=8)
|
||||||
|
])
|
||||||
|
], fluid=True)
|
||||||
|
|
||||||
|
|
||||||
|
# --- Callback Function ---
|
||||||
|
@app.callback(
|
||||||
|
Output('graph-container', 'children'),
|
||||||
|
[Input('metric-checklist', 'value'),
|
||||||
|
Input('config-checklist', 'value')]
|
||||||
|
)
|
||||||
|
def update_graphs(selected_metrics, selected_configs):
|
||||||
|
"""
|
||||||
|
This function is triggered when a control's value changes.
|
||||||
|
It generates and returns a list of graphs based on user selections.
|
||||||
|
"""
|
||||||
|
# Handle cases where no selection is made to prevent errors
|
||||||
|
if not selected_metrics or not selected_configs:
|
||||||
|
return dbc.Alert("Please select at least one metric and one configuration to view data.", color="info")
|
||||||
|
|
||||||
|
# Filter the DataFrame based on the selected configurations
|
||||||
|
filtered_df = df[df['label'].isin(selected_configs)]
|
||||||
|
|
||||||
|
# Create a list to hold all the graph components
|
||||||
|
graph_list = []
|
||||||
|
|
||||||
|
# Define user-friendly titles for graphs
|
||||||
|
metric_titles = {
|
||||||
|
'iops': 'IOPS Comparison (Higher is Better)',
|
||||||
|
'latency_mean_ms': 'Mean Latency (ms) Comparison (Lower is Better)',
|
||||||
|
'bandwidth_mbps': 'Bandwidth (MB/s) Comparison (Higher is Better)'
|
||||||
|
}
|
||||||
|
|
||||||
|
# Loop through each selected metric and create a graph for it
|
||||||
|
for metric in selected_metrics:
|
||||||
|
# Determine if sorting should be ascending (for latency) or descending
|
||||||
|
sort_order = 'total ascending' if metric == 'latency_mean_ms' else 'total descending'
|
||||||
|
|
||||||
|
# Special handling for latency to include error bars for standard deviation
|
||||||
|
error_y_param = 'latency_stddev_ms' if metric == 'latency_mean_ms' else None
|
||||||
|
|
||||||
|
fig = px.bar(
|
||||||
|
filtered_df,
|
||||||
|
x='test_name',
|
||||||
|
y=metric,
|
||||||
|
color='label',
|
||||||
|
barmode='group',
|
||||||
|
color_discrete_map=color_map, # 3. Apply the consistent color map
|
||||||
|
error_y=error_y_param, # Adds error bars for latency stddev
|
||||||
|
title=metric_titles.get(metric, metric),
|
||||||
|
labels={
|
||||||
|
"test_name": "Benchmark Test Name",
|
||||||
|
"iops": "IOPS",
|
||||||
|
"latency_mean_ms": "Mean Latency (ms)",
|
||||||
|
"bandwidth_mbps": "Bandwidth (MB/s)",
|
||||||
|
"label": "Cluster Configuration"
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
fig.update_layout(
|
||||||
|
height=500,
|
||||||
|
xaxis_title=None, # Clean up x-axis title
|
||||||
|
legend_title="Configuration",
|
||||||
|
title_x=0.5, # Center the title
|
||||||
|
xaxis={'categoryorder': sort_order},
|
||||||
|
xaxis_tickangle=-45 # Angle labels to prevent overlap
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add the generated graph to our list, wrapped in a column for layout
|
||||||
|
graph_list.append(dbc.Row(dbc.Col(dcc.Graph(figure=fig)), className="mb-4"))
|
||||||
|
|
||||||
|
return graph_list
|
||||||
|
|
||||||
|
# --- Run the App ---
|
||||||
|
if __name__ == '__main__':
|
||||||
|
app.run(debug=True)
|
229
iobench/dash/iobench-dash-v4.py
Normal file
229
iobench/dash/iobench-dash-v4.py
Normal file
@ -0,0 +1,229 @@
|
|||||||
|
import dash
|
||||||
|
from dash import dcc, html, Input, Output, State, clientside_callback, ClientsideFunction
|
||||||
|
import plotly.express as px
|
||||||
|
import pandas as pd
|
||||||
|
import dash_bootstrap_components as dbc
|
||||||
|
import io
|
||||||
|
|
||||||
|
# --- Data Loading and Preparation ---
|
||||||
|
# csv_data = """label,test_name,iops,bandwidth_kibps,latency_mean_ms,latency_stddev_ms
|
||||||
|
# Ceph HDD Only,read-4k-sync-test,1474.302,5897,0.673,0.591
|
||||||
|
# Ceph HDD Only,write-4k-sync-test,14.126,56,27.074,7.046
|
||||||
|
# Ceph HDD Only,randread-4k-sync-test,225.140,900,4.436,6.918
|
||||||
|
# Ceph HDD Only,randwrite-4k-sync-test,13.129,52,34.891,10.859
|
||||||
|
# Ceph HDD Only,multiread-4k-sync-test,6873.675,27494,0.578,0.764
|
||||||
|
# Ceph HDD Only,multiwrite-4k-sync-test,57.135,228,38.660,11.293
|
||||||
|
# Ceph HDD Only,multirandread-4k-sync-test,2451.376,9805,1.626,2.515
|
||||||
|
# Ceph HDD Only,multirandwrite-4k-sync-test,54.642,218,33.492,13.111
|
||||||
|
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,read-4k-sync-test,1495.700,5982,0.664,1.701
|
||||||
|
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,write-4k-sync-test,16.990,67,17.502,9.908
|
||||||
|
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,randread-4k-sync-test,159.256,637,6.274,9.232
|
||||||
|
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,randwrite-4k-sync-test,16.693,66,24.094,16.099
|
||||||
|
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multiread-4k-sync-test,7305.559,29222,0.544,1.338
|
||||||
|
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multiwrite-4k-sync-test,52.260,209,34.891,17.576
|
||||||
|
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multirandread-4k-sync-test,700.606,2802,5.700,10.429
|
||||||
|
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multirandwrite-4k-sync-test,52.723,210,29.709,25.829
|
||||||
|
# Ceph 2 Hosts WAL+DB SSD Only,randwrite-4k-sync-test,90.037,360,3.617,8.321
|
||||||
|
# Ceph WAL+DB SSD During Rebuild,randwrite-4k-sync-test,41.008,164,10.138,19.333
|
||||||
|
# Ceph WAL+DB SSD OSD HDD,read-4k-sync-test,1520.299,6081,0.654,1.539
|
||||||
|
# Ceph WAL+DB SSD OSD HDD,write-4k-sync-test,78.528,314,4.074,9.101
|
||||||
|
# Ceph WAL+DB SSD OSD HDD,randread-4k-sync-test,153.303,613,6.518,9.036
|
||||||
|
# Ceph WAL+DB SSD OSD HDD,randwrite-4k-sync-test,48.677,194,8.785,20.356
|
||||||
|
# Ceph WAL+DB SSD OSD HDD,multiread-4k-sync-test,6804.880,27219,0.584,1.422
|
||||||
|
# Ceph WAL+DB SSD OSD HDD,multiwrite-4k-sync-test,311.513,1246,4.978,9.458
|
||||||
|
# Ceph WAL+DB SSD OSD HDD,multirandread-4k-sync-test,581.756,2327,6.869,10.204
|
||||||
|
# Ceph WAL+DB SSD OSD HDD,multirandwrite-4k-sync-test,120.556,482,13.463,25.440
|
||||||
|
# """
|
||||||
|
#
|
||||||
|
# df = pd.read_csv(io.StringIO(csv_data))
|
||||||
|
df = pd.read_csv("iobench.csv") # Replace with the actual file path
|
||||||
|
df['bandwidth_mbps'] = df['bandwidth_kibps'] / 1024
|
||||||
|
|
||||||
|
# --- App Initialization and Global Settings ---
|
||||||
|
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.FLATLY])
|
||||||
|
|
||||||
|
# Create master lists of options for checklists
|
||||||
|
unique_labels = sorted(df['label'].unique())
|
||||||
|
unique_tests = sorted(df['test_name'].unique())
|
||||||
|
|
||||||
|
# Create a consistent color map for each unique label
|
||||||
|
color_map = {label: color for label, color in zip(unique_labels, px.colors.qualitative.Plotly)}
|
||||||
|
|
||||||
|
# --- App Layout ---
|
||||||
|
app.layout = dbc.Container([
|
||||||
|
# Header
|
||||||
|
dbc.Row(dbc.Col(html.H1("Ceph iobench Performance Dashboard", className="text-primary"),), className="my-4 text-center"),
|
||||||
|
|
||||||
|
# Controls and Graphs Row
|
||||||
|
dbc.Row([
|
||||||
|
# Control Panel Column
|
||||||
|
dbc.Col([
|
||||||
|
dbc.Card([
|
||||||
|
dbc.CardBody([
|
||||||
|
html.H4("Control Panel", className="card-title"),
|
||||||
|
html.Hr(),
|
||||||
|
|
||||||
|
# Metric Selection
|
||||||
|
dbc.Label("1. Select Metrics to Display:", html_for="metric-checklist", className="fw-bold"),
|
||||||
|
dcc.Checklist(
|
||||||
|
id='metric-checklist',
|
||||||
|
options=[
|
||||||
|
{'label': 'IOPS', 'value': 'iops'},
|
||||||
|
{'label': 'Latency (ms)', 'value': 'latency_mean_ms'},
|
||||||
|
{'label': 'Bandwidth (MB/s)', 'value': 'bandwidth_mbps'}
|
||||||
|
],
|
||||||
|
value=['iops', 'latency_mean_ms', 'bandwidth_mbps'], # Default selection
|
||||||
|
labelClassName="d-block"
|
||||||
|
),
|
||||||
|
html.Hr(),
|
||||||
|
|
||||||
|
# Configuration Selection
|
||||||
|
dbc.Label("2. Select Configurations:", html_for="config-checklist", className="fw-bold"),
|
||||||
|
dbc.ButtonGroup([
|
||||||
|
dbc.Button("All", id="config-select-all", n_clicks=0, color="primary", outline=True, size="sm"),
|
||||||
|
dbc.Button("None", id="config-select-none", n_clicks=0, color="primary", outline=True, size="sm"),
|
||||||
|
], className="mb-2"),
|
||||||
|
dcc.Checklist(
|
||||||
|
id='config-checklist',
|
||||||
|
options=[{'label': label, 'value': label} for label in unique_labels],
|
||||||
|
value=unique_labels, # Select all by default
|
||||||
|
labelClassName="d-block"
|
||||||
|
),
|
||||||
|
html.Hr(),
|
||||||
|
|
||||||
|
# Test Name Selection
|
||||||
|
dbc.Label("3. Select Tests:", html_for="test-checklist", className="fw-bold"),
|
||||||
|
dbc.ButtonGroup([
|
||||||
|
dbc.Button("All", id="test-select-all", n_clicks=0, color="primary", outline=True, size="sm"),
|
||||||
|
dbc.Button("None", id="test-select-none", n_clicks=0, color="primary", outline=True, size="sm"),
|
||||||
|
], className="mb-2"),
|
||||||
|
dcc.Checklist(
|
||||||
|
id='test-checklist',
|
||||||
|
options=[{'label': test, 'value': test} for test in unique_tests],
|
||||||
|
value=unique_tests, # Select all by default
|
||||||
|
labelClassName="d-block"
|
||||||
|
),
|
||||||
|
])
|
||||||
|
], className="mb-4")
|
||||||
|
], width=12, lg=4),
|
||||||
|
|
||||||
|
# Graph Display Column
|
||||||
|
dbc.Col(id='graph-container', width=12, lg=8)
|
||||||
|
])
|
||||||
|
], fluid=True)
|
||||||
|
|
||||||
|
|
||||||
|
# --- Callbacks ---
|
||||||
|
|
||||||
|
# Callback to handle "Select All" / "Select None" for configurations
|
||||||
|
@app.callback(
|
||||||
|
Output('config-checklist', 'value'),
|
||||||
|
Input('config-select-all', 'n_clicks'),
|
||||||
|
Input('config-select-none', 'n_clicks'),
|
||||||
|
prevent_initial_call=True
|
||||||
|
)
|
||||||
|
def select_all_none_configs(all_clicks, none_clicks):
|
||||||
|
ctx = dash.callback_context
|
||||||
|
if not ctx.triggered:
|
||||||
|
return dash.no_update
|
||||||
|
|
||||||
|
button_id = ctx.triggered[0]['prop_id'].split('.')[0]
|
||||||
|
if button_id == 'config-select-all':
|
||||||
|
return unique_labels
|
||||||
|
elif button_id == 'config-select-none':
|
||||||
|
return []
|
||||||
|
return dash.no_update
|
||||||
|
|
||||||
|
# Callback to handle "Select All" / "Select None" for tests
|
||||||
|
@app.callback(
|
||||||
|
Output('test-checklist', 'value'),
|
||||||
|
Input('test-select-all', 'n_clicks'),
|
||||||
|
Input('test-select-none', 'n_clicks'),
|
||||||
|
prevent_initial_call=True
|
||||||
|
)
|
||||||
|
def select_all_none_tests(all_clicks, none_clicks):
|
||||||
|
ctx = dash.callback_context
|
||||||
|
if not ctx.triggered:
|
||||||
|
return dash.no_update
|
||||||
|
|
||||||
|
button_id = ctx.triggered[0]['prop_id'].split('.')[0]
|
||||||
|
if button_id == 'test-select-all':
|
||||||
|
return unique_tests
|
||||||
|
elif button_id == 'test-select-none':
|
||||||
|
return []
|
||||||
|
return dash.no_update
|
||||||
|
|
||||||
|
|
||||||
|
# Main callback to update graphs based on all selections
|
||||||
|
@app.callback(
|
||||||
|
Output('graph-container', 'children'),
|
||||||
|
[Input('metric-checklist', 'value'),
|
||||||
|
Input('config-checklist', 'value'),
|
||||||
|
Input('test-checklist', 'value')]
|
||||||
|
)
|
||||||
|
def update_graphs(selected_metrics, selected_configs, selected_tests):
|
||||||
|
"""
|
||||||
|
This function is triggered when any control's value changes.
|
||||||
|
It generates and returns a list of graphs based on all user selections.
|
||||||
|
"""
|
||||||
|
# Handle cases where no selection is made to prevent errors and show a helpful message
|
||||||
|
if not all([selected_metrics, selected_configs, selected_tests]):
|
||||||
|
return dbc.Alert(
|
||||||
|
"Please select at least one item from each category (Metric, Configuration, and Test) to view data.",
|
||||||
|
color="info",
|
||||||
|
className="mt-4"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Filter the DataFrame based on all selected criteria
|
||||||
|
filtered_df = df[df['label'].isin(selected_configs) & df['test_name'].isin(selected_tests)]
|
||||||
|
|
||||||
|
# If the filtered data is empty after selection, inform the user
|
||||||
|
if filtered_df.empty:
|
||||||
|
return dbc.Alert("No data available for the current selection.", color="warning", className="mt-4")
|
||||||
|
|
||||||
|
graph_list = []
|
||||||
|
metric_titles = {
|
||||||
|
'iops': 'IOPS Comparison (Higher is Better)',
|
||||||
|
'latency_mean_ms': 'Mean Latency (ms) Comparison (Lower is Better)',
|
||||||
|
'bandwidth_mbps': 'Bandwidth (MB/s) Comparison (Higher is Better)'
|
||||||
|
}
|
||||||
|
|
||||||
|
for metric in selected_metrics:
|
||||||
|
sort_order = 'total ascending' if metric == 'latency_mean_ms' else 'total descending'
|
||||||
|
error_y_param = 'latency_stddev_ms' if metric == 'latency_mean_ms' else None
|
||||||
|
|
||||||
|
fig = px.bar(
|
||||||
|
filtered_df,
|
||||||
|
x='test_name',
|
||||||
|
y=metric,
|
||||||
|
color='label',
|
||||||
|
barmode='group',
|
||||||
|
color_discrete_map=color_map,
|
||||||
|
error_y=error_y_param,
|
||||||
|
title=metric_titles.get(metric, metric),
|
||||||
|
labels={
|
||||||
|
"test_name": "Benchmark Test Name",
|
||||||
|
"iops": "IOPS",
|
||||||
|
"latency_mean_ms": "Mean Latency (ms)",
|
||||||
|
"bandwidth_mbps": "Bandwidth (MB/s)",
|
||||||
|
"label": "Cluster Configuration"
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
fig.update_layout(
|
||||||
|
height=500,
|
||||||
|
xaxis_title=None,
|
||||||
|
legend_title="Configuration",
|
||||||
|
title_x=0.5,
|
||||||
|
xaxis={'categoryorder': sort_order},
|
||||||
|
xaxis_tickangle=-45,
|
||||||
|
margin=dict(b=120) # Add bottom margin to prevent tick labels from being cut off
|
||||||
|
)
|
||||||
|
|
||||||
|
graph_list.append(dbc.Row(dbc.Col(dcc.Graph(figure=fig)), className="mb-4"))
|
||||||
|
|
||||||
|
return graph_list
|
||||||
|
|
||||||
|
# --- Run the App ---
|
||||||
|
if __name__ == '__main__':
|
||||||
|
app.run(debug=True)
|
29
iobench/dash/requirements_freeze.txt
Normal file
29
iobench/dash/requirements_freeze.txt
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
blinker==1.9.0
|
||||||
|
certifi==2025.7.14
|
||||||
|
charset-normalizer==3.4.2
|
||||||
|
click==8.2.1
|
||||||
|
dash==3.2.0
|
||||||
|
dash-bootstrap-components==2.0.3
|
||||||
|
Flask==3.1.1
|
||||||
|
idna==3.10
|
||||||
|
importlib_metadata==8.7.0
|
||||||
|
itsdangerous==2.2.0
|
||||||
|
Jinja2==3.1.6
|
||||||
|
MarkupSafe==3.0.2
|
||||||
|
narwhals==2.0.1
|
||||||
|
nest-asyncio==1.6.0
|
||||||
|
numpy==2.3.2
|
||||||
|
packaging==25.0
|
||||||
|
pandas==2.3.1
|
||||||
|
plotly==6.2.0
|
||||||
|
python-dateutil==2.9.0.post0
|
||||||
|
pytz==2025.2
|
||||||
|
requests==2.32.4
|
||||||
|
retrying==1.4.1
|
||||||
|
setuptools==80.9.0
|
||||||
|
six==1.17.0
|
||||||
|
typing_extensions==4.14.1
|
||||||
|
tzdata==2025.2
|
||||||
|
urllib3==2.5.0
|
||||||
|
Werkzeug==3.1.3
|
||||||
|
zipp==3.23.0
|
41
iobench/deployment.yaml
Normal file
41
iobench/deployment.yaml
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: iobench
|
||||||
|
labels:
|
||||||
|
app: iobench
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: iobench
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: iobench
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: fio
|
||||||
|
image: juicedata/fio:latest # Replace with your preferred fio image
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
command: [ "sleep", "infinity" ] # Keeps the container running for kubectl exec
|
||||||
|
volumeMounts:
|
||||||
|
- name: iobench-pvc
|
||||||
|
mountPath: /data # Mount the PVC at /data
|
||||||
|
volumes:
|
||||||
|
- name: iobench-pvc
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: iobench-pvc # Matches your PVC name
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: iobench-pvc
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 5Gi
|
||||||
|
storageClassName: ceph-block
|
||||||
|
|
253
iobench/src/main.rs
Normal file
253
iobench/src/main.rs
Normal file
@ -0,0 +1,253 @@
|
|||||||
|
use std::fs;
|
||||||
|
use std::io::{self, Write};
|
||||||
|
use std::process::{Command, Stdio};
|
||||||
|
use std::thread;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use chrono::Local;
|
||||||
|
use clap::Parser;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
/// A simple yet powerful I/O benchmarking tool using fio.
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
#[command(author, version, about, long_about = None)]
|
||||||
|
struct Args {
|
||||||
|
/// Target for the benchmark.
|
||||||
|
/// Formats:
|
||||||
|
/// - localhost (default)
|
||||||
|
/// - ssh/{user}@{host}
|
||||||
|
/// - ssh/{user}@{host}:{port}
|
||||||
|
/// - k8s/{namespace}/{pod}
|
||||||
|
#[arg(short, long, default_value = "localhost")]
|
||||||
|
target: String,
|
||||||
|
|
||||||
|
#[arg(short, long, default_value = ".")]
|
||||||
|
benchmark_dir: String,
|
||||||
|
|
||||||
|
/// Comma-separated list of tests to run.
|
||||||
|
/// Available tests: read, write, randread, randwrite,
|
||||||
|
/// multiread, multiwrite, multirandread, multirandwrite.
|
||||||
|
#[arg(long, default_value = "read,write,randread,randwrite,multiread,multiwrite,multirandread,multirandwrite")]
|
||||||
|
tests: String,
|
||||||
|
|
||||||
|
/// Duration of each test in seconds.
|
||||||
|
#[arg(long, default_value_t = 15)]
|
||||||
|
duration: u64,
|
||||||
|
|
||||||
|
/// Output directory for results.
|
||||||
|
/// Defaults to ./iobench-{current_datetime}.
|
||||||
|
#[arg(long)]
|
||||||
|
output_dir: Option<String>,
|
||||||
|
|
||||||
|
/// The size of the test file for fio.
|
||||||
|
#[arg(long, default_value = "1G")]
|
||||||
|
size: String,
|
||||||
|
|
||||||
|
/// The block size for I/O operations.
|
||||||
|
#[arg(long, default_value = "4k")]
|
||||||
|
block_size: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
struct FioOutput {
|
||||||
|
jobs: Vec<FioJobResult>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
struct FioJobResult {
|
||||||
|
jobname: String,
|
||||||
|
read: FioMetrics,
|
||||||
|
write: FioMetrics,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
struct FioMetrics {
|
||||||
|
bw: f64,
|
||||||
|
iops: f64,
|
||||||
|
clat_ns: LatencyMetrics,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
struct LatencyMetrics {
|
||||||
|
mean: f64,
|
||||||
|
stddev: f64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize)]
|
||||||
|
struct BenchmarkResult {
|
||||||
|
test_name: String,
|
||||||
|
iops: f64,
|
||||||
|
bandwidth_kibps: f64,
|
||||||
|
latency_mean_ms: f64,
|
||||||
|
latency_stddev_ms: f64,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() -> io::Result<()> {
|
||||||
|
let args = Args::parse();
|
||||||
|
|
||||||
|
let output_dir = args.output_dir.unwrap_or_else(|| {
|
||||||
|
format!("./iobench-{}", Local::now().format("%Y-%m-%d-%H%M%S"))
|
||||||
|
});
|
||||||
|
fs::create_dir_all(&output_dir)?;
|
||||||
|
|
||||||
|
let tests_to_run: Vec<&str> = args.tests.split(',').collect();
|
||||||
|
let mut results = Vec::new();
|
||||||
|
|
||||||
|
for test in tests_to_run {
|
||||||
|
println!("--------------------------------------------------");
|
||||||
|
println!("Running test: {}", test);
|
||||||
|
|
||||||
|
let (rw, numjobs) = match test {
|
||||||
|
"read" => ("read", 1),
|
||||||
|
"write" => ("write", 1),
|
||||||
|
"randread" => ("randread", 1),
|
||||||
|
"randwrite" => ("randwrite", 1),
|
||||||
|
"multiread" => ("read", 4),
|
||||||
|
"multiwrite" => ("write", 4),
|
||||||
|
"multirandread" => ("randread", 4),
|
||||||
|
"multirandwrite" => ("randwrite", 4),
|
||||||
|
_ => {
|
||||||
|
eprintln!("Unknown test: {}. Skipping.", test);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let test_name = format!("{}-{}-sync-test", test, args.block_size);
|
||||||
|
let fio_command = format!(
|
||||||
|
"fio --filename={}/iobench_testfile --direct=1 --fsync=1 --rw={} --bs={} --numjobs={} --iodepth=1 --runtime={} --time_based --group_reporting --name={} --size={} --output-format=json",
|
||||||
|
args.benchmark_dir, rw, args.block_size, numjobs, args.duration, test_name, args.size
|
||||||
|
);
|
||||||
|
|
||||||
|
println!("Executing command:\n{}\n", fio_command);
|
||||||
|
|
||||||
|
let output = match run_command(&args.target, &fio_command) {
|
||||||
|
Ok(out) => out,
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("Failed to execute command for test {}: {}", test, e);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
let result = parse_fio_output(&output, &test_name, rw);
|
||||||
|
// TODO store raw fio output and print it
|
||||||
|
match result {
|
||||||
|
Ok(res) => {
|
||||||
|
results.push(res);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("Error parsing fio output for test {}: {}", test, e);
|
||||||
|
eprintln!("Raw output:\n{}", output);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("{output}");
|
||||||
|
println!("Test {} completed.", test);
|
||||||
|
// A brief pause to let the system settle before the next test.
|
||||||
|
thread::sleep(Duration::from_secs(2));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cleanup the test file on the target
|
||||||
|
println!("--------------------------------------------------");
|
||||||
|
println!("Cleaning up test file on target...");
|
||||||
|
let cleanup_command = "rm -f ./iobench_testfile";
|
||||||
|
if let Err(e) = run_command(&args.target, cleanup_command) {
|
||||||
|
eprintln!("Warning: Failed to clean up test file on target: {}", e);
|
||||||
|
} else {
|
||||||
|
println!("Cleanup successful.");
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if results.is_empty() {
|
||||||
|
println!("\nNo benchmark results to display.");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Output results to a CSV file for easy analysis
|
||||||
|
let csv_path = format!("{}/summary.csv", output_dir);
|
||||||
|
let mut wtr = csv::Writer::from_path(&csv_path)?;
|
||||||
|
for result in &results {
|
||||||
|
wtr.serialize(result)?;
|
||||||
|
}
|
||||||
|
wtr.flush()?;
|
||||||
|
|
||||||
|
println!("\nBenchmark summary saved to {}", csv_path);
|
||||||
|
println!("\n--- Benchmark Results Summary ---");
|
||||||
|
println!("{:<25} {:>10} {:>18} {:>20} {:>22}", "Test Name", "IOPS", "Bandwidth (KiB/s)", "Latency Mean (ms)", "Latency StdDev (ms)");
|
||||||
|
println!("{:-<98}", "");
|
||||||
|
for result in results {
|
||||||
|
println!("{:<25} {:>10.2} {:>18.2} {:>20.4} {:>22.4}", result.test_name, result.iops, result.bandwidth_kibps, result.latency_mean_ms, result.latency_stddev_ms);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn run_command(target: &str, command: &str) -> io::Result<String> {
|
||||||
|
let (program, args) = if target == "localhost" {
|
||||||
|
("sudo", vec!["sh".to_string(), "-c".to_string(), command.to_string()])
|
||||||
|
} else if target.starts_with("ssh/") {
|
||||||
|
let target_str = target.strip_prefix("ssh/").unwrap();
|
||||||
|
let ssh_target;
|
||||||
|
let mut ssh_args = vec!["-o".to_string(), "StrictHostKeyChecking=no".to_string()];
|
||||||
|
let port_parts: Vec<&str> = target_str.split(':').collect();
|
||||||
|
if port_parts.len() == 2 {
|
||||||
|
ssh_target = port_parts[0].to_string();
|
||||||
|
ssh_args.push("-p".to_string());
|
||||||
|
ssh_args.push(port_parts[1].to_string());
|
||||||
|
} else {
|
||||||
|
ssh_target = target_str.to_string();
|
||||||
|
}
|
||||||
|
ssh_args.push(ssh_target);
|
||||||
|
ssh_args.push(format!("sudo sh -c '{}'", command));
|
||||||
|
("ssh", ssh_args)
|
||||||
|
} else if target.starts_with("k8s/") {
|
||||||
|
let parts: Vec<&str> = target.strip_prefix("k8s/").unwrap().split('/').collect();
|
||||||
|
if parts.len() != 2 {
|
||||||
|
return Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid k8s target format. Expected k8s/{namespace}/{pod}"));
|
||||||
|
}
|
||||||
|
let namespace = parts[0];
|
||||||
|
let pod = parts[1];
|
||||||
|
("kubectl", vec!["exec".to_string(), "-n".to_string(), namespace.to_string(), pod.to_string(), "--".to_string(), "sh".to_string(), "-c".to_string(), command.to_string()])
|
||||||
|
} else {
|
||||||
|
return Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid target format"));
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut cmd = Command::new(program);
|
||||||
|
cmd.args(&args);
|
||||||
|
cmd.stdout(Stdio::piped()).stderr(Stdio::piped());
|
||||||
|
|
||||||
|
let child = cmd.spawn()?;
|
||||||
|
let output = child.wait_with_output()?;
|
||||||
|
|
||||||
|
if !output.status.success() {
|
||||||
|
eprintln!("Command failed with status: {}", output.status);
|
||||||
|
io::stderr().write_all(&output.stderr)?;
|
||||||
|
return Err(io::Error::new(io::ErrorKind::Other, "Command execution failed"));
|
||||||
|
}
|
||||||
|
|
||||||
|
String::from_utf8(output.stdout)
|
||||||
|
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_fio_output(output: &str, test_name: &str, rw: &str) -> Result<BenchmarkResult, String> {
|
||||||
|
let fio_data: FioOutput = serde_json::from_str(output)
|
||||||
|
.map_err(|e| format!("Failed to deserialize fio JSON: {}", e))?;
|
||||||
|
|
||||||
|
let job_result = fio_data.jobs.iter()
|
||||||
|
.find(|j| j.jobname == test_name)
|
||||||
|
.ok_or_else(|| format!("Could not find job result for '{}' in fio output", test_name))?;
|
||||||
|
|
||||||
|
let metrics = if rw.contains("read") {
|
||||||
|
&job_result.read
|
||||||
|
} else {
|
||||||
|
&job_result.write
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(BenchmarkResult {
|
||||||
|
test_name: test_name.to_string(),
|
||||||
|
iops: metrics.iops,
|
||||||
|
bandwidth_kibps: metrics.bw,
|
||||||
|
latency_mean_ms: metrics.clat_ns.mean / 1_000_000.0,
|
||||||
|
latency_stddev_ms: metrics.clat_ns.stddev / 1_000_000.0,
|
||||||
|
})
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user