forked from NationTech/harmony
feat: Add iobench project and python dashboard
This commit is contained in:
229
iobench/dash/iobench-dash-v4.py
Normal file
229
iobench/dash/iobench-dash-v4.py
Normal file
@@ -0,0 +1,229 @@
|
||||
import dash
|
||||
from dash import dcc, html, Input, Output, State, clientside_callback, ClientsideFunction
|
||||
import plotly.express as px
|
||||
import pandas as pd
|
||||
import dash_bootstrap_components as dbc
|
||||
import io
|
||||
|
||||
# --- Data Loading and Preparation ---
|
||||
# csv_data = """label,test_name,iops,bandwidth_kibps,latency_mean_ms,latency_stddev_ms
|
||||
# Ceph HDD Only,read-4k-sync-test,1474.302,5897,0.673,0.591
|
||||
# Ceph HDD Only,write-4k-sync-test,14.126,56,27.074,7.046
|
||||
# Ceph HDD Only,randread-4k-sync-test,225.140,900,4.436,6.918
|
||||
# Ceph HDD Only,randwrite-4k-sync-test,13.129,52,34.891,10.859
|
||||
# Ceph HDD Only,multiread-4k-sync-test,6873.675,27494,0.578,0.764
|
||||
# Ceph HDD Only,multiwrite-4k-sync-test,57.135,228,38.660,11.293
|
||||
# Ceph HDD Only,multirandread-4k-sync-test,2451.376,9805,1.626,2.515
|
||||
# Ceph HDD Only,multirandwrite-4k-sync-test,54.642,218,33.492,13.111
|
||||
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,read-4k-sync-test,1495.700,5982,0.664,1.701
|
||||
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,write-4k-sync-test,16.990,67,17.502,9.908
|
||||
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,randread-4k-sync-test,159.256,637,6.274,9.232
|
||||
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,randwrite-4k-sync-test,16.693,66,24.094,16.099
|
||||
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multiread-4k-sync-test,7305.559,29222,0.544,1.338
|
||||
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multiwrite-4k-sync-test,52.260,209,34.891,17.576
|
||||
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multirandread-4k-sync-test,700.606,2802,5.700,10.429
|
||||
# Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multirandwrite-4k-sync-test,52.723,210,29.709,25.829
|
||||
# Ceph 2 Hosts WAL+DB SSD Only,randwrite-4k-sync-test,90.037,360,3.617,8.321
|
||||
# Ceph WAL+DB SSD During Rebuild,randwrite-4k-sync-test,41.008,164,10.138,19.333
|
||||
# Ceph WAL+DB SSD OSD HDD,read-4k-sync-test,1520.299,6081,0.654,1.539
|
||||
# Ceph WAL+DB SSD OSD HDD,write-4k-sync-test,78.528,314,4.074,9.101
|
||||
# Ceph WAL+DB SSD OSD HDD,randread-4k-sync-test,153.303,613,6.518,9.036
|
||||
# Ceph WAL+DB SSD OSD HDD,randwrite-4k-sync-test,48.677,194,8.785,20.356
|
||||
# Ceph WAL+DB SSD OSD HDD,multiread-4k-sync-test,6804.880,27219,0.584,1.422
|
||||
# Ceph WAL+DB SSD OSD HDD,multiwrite-4k-sync-test,311.513,1246,4.978,9.458
|
||||
# Ceph WAL+DB SSD OSD HDD,multirandread-4k-sync-test,581.756,2327,6.869,10.204
|
||||
# Ceph WAL+DB SSD OSD HDD,multirandwrite-4k-sync-test,120.556,482,13.463,25.440
|
||||
# """
|
||||
#
|
||||
# df = pd.read_csv(io.StringIO(csv_data))
|
||||
df = pd.read_csv("iobench.csv") # Replace with the actual file path
|
||||
df['bandwidth_mbps'] = df['bandwidth_kibps'] / 1024
|
||||
|
||||
# --- App Initialization and Global Settings ---
|
||||
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.FLATLY])
|
||||
|
||||
# Create master lists of options for checklists
|
||||
unique_labels = sorted(df['label'].unique())
|
||||
unique_tests = sorted(df['test_name'].unique())
|
||||
|
||||
# Create a consistent color map for each unique label
|
||||
color_map = {label: color for label, color in zip(unique_labels, px.colors.qualitative.Plotly)}
|
||||
|
||||
# --- App Layout ---
|
||||
app.layout = dbc.Container([
|
||||
# Header
|
||||
dbc.Row(dbc.Col(html.H1("Ceph iobench Performance Dashboard", className="text-primary"),), className="my-4 text-center"),
|
||||
|
||||
# Controls and Graphs Row
|
||||
dbc.Row([
|
||||
# Control Panel Column
|
||||
dbc.Col([
|
||||
dbc.Card([
|
||||
dbc.CardBody([
|
||||
html.H4("Control Panel", className="card-title"),
|
||||
html.Hr(),
|
||||
|
||||
# Metric Selection
|
||||
dbc.Label("1. Select Metrics to Display:", html_for="metric-checklist", className="fw-bold"),
|
||||
dcc.Checklist(
|
||||
id='metric-checklist',
|
||||
options=[
|
||||
{'label': 'IOPS', 'value': 'iops'},
|
||||
{'label': 'Latency (ms)', 'value': 'latency_mean_ms'},
|
||||
{'label': 'Bandwidth (MB/s)', 'value': 'bandwidth_mbps'}
|
||||
],
|
||||
value=['iops', 'latency_mean_ms', 'bandwidth_mbps'], # Default selection
|
||||
labelClassName="d-block"
|
||||
),
|
||||
html.Hr(),
|
||||
|
||||
# Configuration Selection
|
||||
dbc.Label("2. Select Configurations:", html_for="config-checklist", className="fw-bold"),
|
||||
dbc.ButtonGroup([
|
||||
dbc.Button("All", id="config-select-all", n_clicks=0, color="primary", outline=True, size="sm"),
|
||||
dbc.Button("None", id="config-select-none", n_clicks=0, color="primary", outline=True, size="sm"),
|
||||
], className="mb-2"),
|
||||
dcc.Checklist(
|
||||
id='config-checklist',
|
||||
options=[{'label': label, 'value': label} for label in unique_labels],
|
||||
value=unique_labels, # Select all by default
|
||||
labelClassName="d-block"
|
||||
),
|
||||
html.Hr(),
|
||||
|
||||
# Test Name Selection
|
||||
dbc.Label("3. Select Tests:", html_for="test-checklist", className="fw-bold"),
|
||||
dbc.ButtonGroup([
|
||||
dbc.Button("All", id="test-select-all", n_clicks=0, color="primary", outline=True, size="sm"),
|
||||
dbc.Button("None", id="test-select-none", n_clicks=0, color="primary", outline=True, size="sm"),
|
||||
], className="mb-2"),
|
||||
dcc.Checklist(
|
||||
id='test-checklist',
|
||||
options=[{'label': test, 'value': test} for test in unique_tests],
|
||||
value=unique_tests, # Select all by default
|
||||
labelClassName="d-block"
|
||||
),
|
||||
])
|
||||
], className="mb-4")
|
||||
], width=12, lg=4),
|
||||
|
||||
# Graph Display Column
|
||||
dbc.Col(id='graph-container', width=12, lg=8)
|
||||
])
|
||||
], fluid=True)
|
||||
|
||||
|
||||
# --- Callbacks ---
|
||||
|
||||
# Callback to handle "Select All" / "Select None" for configurations
|
||||
@app.callback(
|
||||
Output('config-checklist', 'value'),
|
||||
Input('config-select-all', 'n_clicks'),
|
||||
Input('config-select-none', 'n_clicks'),
|
||||
prevent_initial_call=True
|
||||
)
|
||||
def select_all_none_configs(all_clicks, none_clicks):
|
||||
ctx = dash.callback_context
|
||||
if not ctx.triggered:
|
||||
return dash.no_update
|
||||
|
||||
button_id = ctx.triggered[0]['prop_id'].split('.')[0]
|
||||
if button_id == 'config-select-all':
|
||||
return unique_labels
|
||||
elif button_id == 'config-select-none':
|
||||
return []
|
||||
return dash.no_update
|
||||
|
||||
# Callback to handle "Select All" / "Select None" for tests
|
||||
@app.callback(
|
||||
Output('test-checklist', 'value'),
|
||||
Input('test-select-all', 'n_clicks'),
|
||||
Input('test-select-none', 'n_clicks'),
|
||||
prevent_initial_call=True
|
||||
)
|
||||
def select_all_none_tests(all_clicks, none_clicks):
|
||||
ctx = dash.callback_context
|
||||
if not ctx.triggered:
|
||||
return dash.no_update
|
||||
|
||||
button_id = ctx.triggered[0]['prop_id'].split('.')[0]
|
||||
if button_id == 'test-select-all':
|
||||
return unique_tests
|
||||
elif button_id == 'test-select-none':
|
||||
return []
|
||||
return dash.no_update
|
||||
|
||||
|
||||
# Main callback to update graphs based on all selections
|
||||
@app.callback(
|
||||
Output('graph-container', 'children'),
|
||||
[Input('metric-checklist', 'value'),
|
||||
Input('config-checklist', 'value'),
|
||||
Input('test-checklist', 'value')]
|
||||
)
|
||||
def update_graphs(selected_metrics, selected_configs, selected_tests):
|
||||
"""
|
||||
This function is triggered when any control's value changes.
|
||||
It generates and returns a list of graphs based on all user selections.
|
||||
"""
|
||||
# Handle cases where no selection is made to prevent errors and show a helpful message
|
||||
if not all([selected_metrics, selected_configs, selected_tests]):
|
||||
return dbc.Alert(
|
||||
"Please select at least one item from each category (Metric, Configuration, and Test) to view data.",
|
||||
color="info",
|
||||
className="mt-4"
|
||||
)
|
||||
|
||||
# Filter the DataFrame based on all selected criteria
|
||||
filtered_df = df[df['label'].isin(selected_configs) & df['test_name'].isin(selected_tests)]
|
||||
|
||||
# If the filtered data is empty after selection, inform the user
|
||||
if filtered_df.empty:
|
||||
return dbc.Alert("No data available for the current selection.", color="warning", className="mt-4")
|
||||
|
||||
graph_list = []
|
||||
metric_titles = {
|
||||
'iops': 'IOPS Comparison (Higher is Better)',
|
||||
'latency_mean_ms': 'Mean Latency (ms) Comparison (Lower is Better)',
|
||||
'bandwidth_mbps': 'Bandwidth (MB/s) Comparison (Higher is Better)'
|
||||
}
|
||||
|
||||
for metric in selected_metrics:
|
||||
sort_order = 'total ascending' if metric == 'latency_mean_ms' else 'total descending'
|
||||
error_y_param = 'latency_stddev_ms' if metric == 'latency_mean_ms' else None
|
||||
|
||||
fig = px.bar(
|
||||
filtered_df,
|
||||
x='test_name',
|
||||
y=metric,
|
||||
color='label',
|
||||
barmode='group',
|
||||
color_discrete_map=color_map,
|
||||
error_y=error_y_param,
|
||||
title=metric_titles.get(metric, metric),
|
||||
labels={
|
||||
"test_name": "Benchmark Test Name",
|
||||
"iops": "IOPS",
|
||||
"latency_mean_ms": "Mean Latency (ms)",
|
||||
"bandwidth_mbps": "Bandwidth (MB/s)",
|
||||
"label": "Cluster Configuration"
|
||||
}
|
||||
)
|
||||
|
||||
fig.update_layout(
|
||||
height=500,
|
||||
xaxis_title=None,
|
||||
legend_title="Configuration",
|
||||
title_x=0.5,
|
||||
xaxis={'categoryorder': sort_order},
|
||||
xaxis_tickangle=-45,
|
||||
margin=dict(b=120) # Add bottom margin to prevent tick labels from being cut off
|
||||
)
|
||||
|
||||
graph_list.append(dbc.Row(dbc.Col(dcc.Graph(figure=fig)), className="mb-4"))
|
||||
|
||||
return graph_list
|
||||
|
||||
# --- Run the App ---
|
||||
if __name__ == '__main__':
|
||||
app.run(debug=True)
|
||||
Reference in New Issue
Block a user