forked from NationTech/harmony
feat: Add iobench project and python dashboard
This commit is contained in:
149
iobench/dash/iobench-dash-v2.py
Normal file
149
iobench/dash/iobench-dash-v2.py
Normal file
@@ -0,0 +1,149 @@
|
||||
import dash
|
||||
from dash import dcc, html, Input, Output
|
||||
import plotly.express as px
|
||||
import pandas as pd
|
||||
import dash_bootstrap_components as dbc
|
||||
import io
|
||||
|
||||
# --- Sample Data ---
|
||||
# In a real-world scenario, you would load this from a CSV file.
|
||||
# For this self-contained example, we define the data directly.
|
||||
# Example: df = pd.read_csv('benchmark_data.csv')
|
||||
csv_data = """
|
||||
config,op_type,iops,latency_ms,throughput_mbs
|
||||
All-HDD,4k_random_read,260,60,1.02
|
||||
All-HDD,4k_random_write,100,150,0.39
|
||||
All-HDD,64k_sequential_read,2100,30,131.25
|
||||
All-HDD,64k_sequential_write,1500,42,93.75
|
||||
HDD+SSD_WAL,4k_random_read,270,58,1.05
|
||||
HDD+SSD_WAL,4k_random_write,160,100,0.62
|
||||
HDD+SSD_WAL,64k_sequential_read,2150,29,134.37
|
||||
HDD+SSD_WAL,64k_sequential_write,1800,35,112.5
|
||||
HDD+SSD_WAL_DB,4k_random_read,1250,12,4.88
|
||||
HDD+SSD_WAL_DB,4k_random_write,1550,10,6.05
|
||||
HDD+SSD_WAL_DB,64k_sequential_read,2200,28,137.5
|
||||
HDD+SSD_WAL_DB,64k_sequential_write,2000,32,125
|
||||
All-NVMe,4k_random_read,400000,0.1,1562.5
|
||||
All-NVMe,4k_random_write,350000,0.12,1367.18
|
||||
All-NVMe,64k_sequential_read,16000,4,1000
|
||||
All-NVMe,64k_sequential_write,12500,5,800
|
||||
"""
|
||||
|
||||
# Read the data using pandas
|
||||
df = pd.read_csv(io.StringIO(csv_data))
|
||||
|
||||
# Initialize the Dash app with a Bootstrap theme
|
||||
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.FLATLY])
|
||||
|
||||
# --- App Layout ---
|
||||
app.layout = dbc.Container([
|
||||
# Header
|
||||
dbc.Row([
|
||||
dbc.Col([
|
||||
html.H1("Ceph Cluster Benchmark Visualizer", className="text-primary"),
|
||||
html.P(
|
||||
"An interactive tool to compare performance metrics across different Ceph storage configurations.",
|
||||
className="lead"
|
||||
)
|
||||
])
|
||||
], className="my-4"),
|
||||
|
||||
# Controls and Graphs Row
|
||||
dbc.Row([
|
||||
# Control Panel Column
|
||||
dbc.Col([
|
||||
dbc.Card([
|
||||
dbc.CardBody([
|
||||
html.H4("Control Panel", className="card-title"),
|
||||
html.Hr(),
|
||||
|
||||
# Metric Selection Radio Buttons
|
||||
dbc.Label("Select Metric to Display:", html_for="metric-selector"),
|
||||
dcc.RadioItems(
|
||||
id='metric-selector',
|
||||
options=[
|
||||
{'label': 'IOPS (Input/Output Operations Per Second)', 'value': 'iops'},
|
||||
{'label': 'Latency (in Milliseconds)', 'value': 'latency_ms'},
|
||||
{'label': 'Throughput (in MB/s)', 'value': 'throughput_mbs'}
|
||||
],
|
||||
value='iops', # Default value
|
||||
labelClassName="d-block" # Display labels as blocks
|
||||
),
|
||||
html.Hr(),
|
||||
|
||||
# Configuration Selection Checklist
|
||||
dbc.Label("Select Configurations to Compare:", html_for="config-checklist"),
|
||||
dcc.Checklist(
|
||||
id='config-checklist',
|
||||
options=[{'label': config, 'value': config} for config in df['config'].unique()],
|
||||
value=df['config'].unique(), # Select all by default
|
||||
labelClassName="d-block"
|
||||
),
|
||||
])
|
||||
], className="mb-4")
|
||||
], width=12, lg=4), # Full width on small screens, 1/3 on large
|
||||
|
||||
# Graph Display Column
|
||||
dbc.Col([
|
||||
dcc.Graph(id='benchmark-graph')
|
||||
], width=12, lg=8) # Full width on small screens, 2/3 on large
|
||||
])
|
||||
], fluid=True) # Use a fluid container for full-width layout
|
||||
|
||||
|
||||
# --- Callback Function ---
|
||||
# This function connects the controls to the graph
|
||||
@app.callback(
|
||||
Output('benchmark-graph', 'figure'),
|
||||
[Input('metric-selector', 'value'),
|
||||
Input('config-checklist', 'value')]
|
||||
)
|
||||
def update_graph(selected_metric, selected_configs):
|
||||
"""
|
||||
This function is triggered whenever a control's value changes.
|
||||
It filters the dataframe and returns an updated bar chart figure.
|
||||
"""
|
||||
if not selected_configs:
|
||||
# If no configs are selected, return an empty figure to avoid errors
|
||||
return go.Figure().update_layout(
|
||||
title="Please select a configuration to view data.",
|
||||
xaxis_title="",
|
||||
yaxis_title=""
|
||||
)
|
||||
|
||||
# Filter the DataFrame based on the selected configurations
|
||||
filtered_df = df[df['config'].isin(selected_configs)]
|
||||
|
||||
# Create the bar chart using Plotly Express
|
||||
fig = px.bar(
|
||||
filtered_df,
|
||||
x='op_type',
|
||||
y=selected_metric,
|
||||
color='config',
|
||||
barmode='group', # Group bars for different configs side-by-side
|
||||
labels={
|
||||
"op_type": "Benchmark Operation Type",
|
||||
"iops": "IOPS (Higher is Better)",
|
||||
"latency_ms": "Latency in ms (Lower is Better)",
|
||||
"throughput_mbs": "Throughput in MB/s (Higher is Better)",
|
||||
"config": "Storage Configuration"
|
||||
},
|
||||
title=f"Benchmark Comparison for: {selected_metric.replace('_', ' ').title()}",
|
||||
height=600 # Set a fixed height for the graph
|
||||
)
|
||||
|
||||
# Update layout for better readability
|
||||
fig.update_layout(
|
||||
xaxis_title="Operation Type",
|
||||
yaxis_title=selected_metric.replace('_', ' ').title(),
|
||||
legend_title="Configuration",
|
||||
title_x=0.5, # Center the title
|
||||
xaxis={'categoryorder':'total descending' if selected_metric != 'latency_ms' else 'total ascending'}
|
||||
)
|
||||
|
||||
return fig
|
||||
|
||||
# --- Run the App ---
|
||||
if __name__ == '__main__':
|
||||
# Use debug=True for development, allowing hot-reloading
|
||||
app.run(debug=True)
|
||||
Reference in New Issue
Block a user