diff --git a/iobench/dash/iobench-dash-v1.py b/iobench/dash/iobench-dash-v1.py deleted file mode 100644 index 7a47620..0000000 --- a/iobench/dash/iobench-dash-v1.py +++ /dev/null @@ -1,109 +0,0 @@ -from dash import Dash, dcc, html, Input, Output -import plotly.graph_objects as go -import pandas as pd - -# Load the CSV data -df = pd.read_csv("iobench.csv") # Replace with the actual file path - -# Initialize Dash app -app = Dash(__name__) - -# Layout -app.layout = html.Div( - [ - html.H1("IOBench Results Viewer", style={"textAlign": "center"}), - - # Filters - html.Div( - [ - html.Label("Filter by Label:"), - dcc.Dropdown( - id="label-filter", - options=[{"label": label, "value": label} for label in df["label"].unique()], - value=df["label"].unique().tolist(), - multi=True, - ), - html.Label("Filter by Test Name:"), - dcc.Dropdown( - id="test-filter", - options=[{"label": test, "value": test} for test in df["test_name"].unique()], - value=df["test_name"].unique().tolist(), - multi=True, - ), - ], - style={"width": "25%", "display": "inline-block", "verticalAlign": "top", "padding": "10px"}, - ), - - # Graphs - html.Div( - [ - dcc.Graph(id="throughput-graph"), - dcc.Graph(id="latency-graph"), - ], - style={"width": "70%", "display": "inline-block", "padding": "10px"}, - ), - ] -) - -# Callbacks -@app.callback( - [Output("throughput-graph", "figure"), Output("latency-graph", "figure")], - [Input("label-filter", "value"), Input("test-filter", "value")], -) -def update_graphs(selected_labels, selected_tests): - # Filter data - filtered_df = df[df["label"].isin(selected_labels) & df["test_name"].isin(selected_tests)] - - # Throughput Graph - throughput_fig = go.Figure() - for label in filtered_df["label"].unique(): - subset = filtered_df[filtered_df["label"] == label] - throughput_fig.add_trace( - go.Bar( - x=subset["test_name"], - y=subset["iops"], - name=f"{label} - IOPS", - ) - ) - throughput_fig.add_trace( - go.Bar( - x=subset["test_name"], - y=subset["bandwidth_kibps"], - name=f"{label} - Bandwidth (KiB/s)", - ) - ) - throughput_fig.update_layout( - title="Throughput (IOPS and Bandwidth)", - xaxis_title="Test Name", - yaxis_title="Value", - barmode="group", - ) - - # Latency Graph - latency_fig = go.Figure() - for label in filtered_df["label"].unique(): - subset = filtered_df[filtered_df["label"] == label] - latency_fig.add_trace( - go.Scatter( - x=subset["test_name"], - y=subset["latency_mean_ms"], - mode="markers+lines", - name=f"{label} - Latency Mean (ms)", - error_y=dict( - type="data", - array=subset["latency_stddev_ms"], - visible=True, - ), - ) - ) - latency_fig.update_layout( - title="Latency with Standard Deviation", - xaxis_title="Test Name", - yaxis_title="Latency (ms)", - ) - - return throughput_fig, latency_fig - - -if __name__ == "__main__": - app.run_server(debug=True) diff --git a/iobench/dash/iobench-dash-v2.py b/iobench/dash/iobench-dash-v2.py deleted file mode 100644 index 7b28947..0000000 --- a/iobench/dash/iobench-dash-v2.py +++ /dev/null @@ -1,149 +0,0 @@ -import dash -from dash import dcc, html, Input, Output -import plotly.express as px -import pandas as pd -import dash_bootstrap_components as dbc -import io - -# --- Sample Data --- -# In a real-world scenario, you would load this from a CSV file. -# For this self-contained example, we define the data directly. -# Example: df = pd.read_csv('benchmark_data.csv') -csv_data = """ -config,op_type,iops,latency_ms,throughput_mbs -All-HDD,4k_random_read,260,60,1.02 -All-HDD,4k_random_write,100,150,0.39 -All-HDD,64k_sequential_read,2100,30,131.25 -All-HDD,64k_sequential_write,1500,42,93.75 -HDD+SSD_WAL,4k_random_read,270,58,1.05 -HDD+SSD_WAL,4k_random_write,160,100,0.62 -HDD+SSD_WAL,64k_sequential_read,2150,29,134.37 -HDD+SSD_WAL,64k_sequential_write,1800,35,112.5 -HDD+SSD_WAL_DB,4k_random_read,1250,12,4.88 -HDD+SSD_WAL_DB,4k_random_write,1550,10,6.05 -HDD+SSD_WAL_DB,64k_sequential_read,2200,28,137.5 -HDD+SSD_WAL_DB,64k_sequential_write,2000,32,125 -All-NVMe,4k_random_read,400000,0.1,1562.5 -All-NVMe,4k_random_write,350000,0.12,1367.18 -All-NVMe,64k_sequential_read,16000,4,1000 -All-NVMe,64k_sequential_write,12500,5,800 -""" - -# Read the data using pandas -df = pd.read_csv(io.StringIO(csv_data)) - -# Initialize the Dash app with a Bootstrap theme -app = dash.Dash(__name__, external_stylesheets=[dbc.themes.FLATLY]) - -# --- App Layout --- -app.layout = dbc.Container([ - # Header - dbc.Row([ - dbc.Col([ - html.H1("Ceph Cluster Benchmark Visualizer", className="text-primary"), - html.P( - "An interactive tool to compare performance metrics across different Ceph storage configurations.", - className="lead" - ) - ]) - ], className="my-4"), - - # Controls and Graphs Row - dbc.Row([ - # Control Panel Column - dbc.Col([ - dbc.Card([ - dbc.CardBody([ - html.H4("Control Panel", className="card-title"), - html.Hr(), - - # Metric Selection Radio Buttons - dbc.Label("Select Metric to Display:", html_for="metric-selector"), - dcc.RadioItems( - id='metric-selector', - options=[ - {'label': 'IOPS (Input/Output Operations Per Second)', 'value': 'iops'}, - {'label': 'Latency (in Milliseconds)', 'value': 'latency_ms'}, - {'label': 'Throughput (in MB/s)', 'value': 'throughput_mbs'} - ], - value='iops', # Default value - labelClassName="d-block" # Display labels as blocks - ), - html.Hr(), - - # Configuration Selection Checklist - dbc.Label("Select Configurations to Compare:", html_for="config-checklist"), - dcc.Checklist( - id='config-checklist', - options=[{'label': config, 'value': config} for config in df['config'].unique()], - value=df['config'].unique(), # Select all by default - labelClassName="d-block" - ), - ]) - ], className="mb-4") - ], width=12, lg=4), # Full width on small screens, 1/3 on large - - # Graph Display Column - dbc.Col([ - dcc.Graph(id='benchmark-graph') - ], width=12, lg=8) # Full width on small screens, 2/3 on large - ]) -], fluid=True) # Use a fluid container for full-width layout - - -# --- Callback Function --- -# This function connects the controls to the graph -@app.callback( - Output('benchmark-graph', 'figure'), - [Input('metric-selector', 'value'), - Input('config-checklist', 'value')] -) -def update_graph(selected_metric, selected_configs): - """ - This function is triggered whenever a control's value changes. - It filters the dataframe and returns an updated bar chart figure. - """ - if not selected_configs: - # If no configs are selected, return an empty figure to avoid errors - return go.Figure().update_layout( - title="Please select a configuration to view data.", - xaxis_title="", - yaxis_title="" - ) - - # Filter the DataFrame based on the selected configurations - filtered_df = df[df['config'].isin(selected_configs)] - - # Create the bar chart using Plotly Express - fig = px.bar( - filtered_df, - x='op_type', - y=selected_metric, - color='config', - barmode='group', # Group bars for different configs side-by-side - labels={ - "op_type": "Benchmark Operation Type", - "iops": "IOPS (Higher is Better)", - "latency_ms": "Latency in ms (Lower is Better)", - "throughput_mbs": "Throughput in MB/s (Higher is Better)", - "config": "Storage Configuration" - }, - title=f"Benchmark Comparison for: {selected_metric.replace('_', ' ').title()}", - height=600 # Set a fixed height for the graph - ) - - # Update layout for better readability - fig.update_layout( - xaxis_title="Operation Type", - yaxis_title=selected_metric.replace('_', ' ').title(), - legend_title="Configuration", - title_x=0.5, # Center the title - xaxis={'categoryorder':'total descending' if selected_metric != 'latency_ms' else 'total ascending'} - ) - - return fig - -# --- Run the App --- -if __name__ == '__main__': - # Use debug=True for development, allowing hot-reloading - app.run(debug=True) diff --git a/iobench/dash/iobench-dash-v3.py b/iobench/dash/iobench-dash-v3.py deleted file mode 100644 index 4c68712..0000000 --- a/iobench/dash/iobench-dash-v3.py +++ /dev/null @@ -1,175 +0,0 @@ -import dash -from dash import dcc, html, Input, Output -import plotly.express as px -import pandas as pd -import dash_bootstrap_components as dbc -import io -import plotly.graph_objects as go - -# --- Data Loading and Preparation --- -# 1. Use the exact iobench csv output format provided. -csv_data = """label,test_name,iops,bandwidth_kibps,latency_mean_ms,latency_stddev_ms -Ceph HDD Only,read-4k-sync-test,1474.302,5897,0.673,0.591 -Ceph HDD Only,write-4k-sync-test,14.126,56,27.074,7.046 -Ceph HDD Only,randread-4k-sync-test,225.140,900,4.436,6.918 -Ceph HDD Only,randwrite-4k-sync-test,13.129,52,34.891,10.859 -Ceph HDD Only,multiread-4k-sync-test,6873.675,27494,0.578,0.764 -Ceph HDD Only,multiwrite-4k-sync-test,57.135,228,38.660,11.293 -Ceph HDD Only,multirandread-4k-sync-test,2451.376,9805,1.626,2.515 -Ceph HDD Only,multirandwrite-4k-sync-test,54.642,218,33.492,13.111 -Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,read-4k-sync-test,1495.700,5982,0.664,1.701 -Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,write-4k-sync-test,16.990,67,17.502,9.908 -Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,randread-4k-sync-test,159.256,637,6.274,9.232 -Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,randwrite-4k-sync-test,16.693,66,24.094,16.099 -Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multiread-4k-sync-test,7305.559,29222,0.544,1.338 -Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multiwrite-4k-sync-test,52.260,209,34.891,17.576 -Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multirandread-4k-sync-test,700.606,2802,5.700,10.429 -Ceph 2 Hosts WAL+DB SSD and 1 Host HDD,multirandwrite-4k-sync-test,52.723,210,29.709,25.829 -Ceph 2 Hosts WAL+DB SSD Only,randwrite-4k-sync-test,90.037,360,3.617,8.321 -Ceph WAL+DB SSD During Rebuild,randwrite-4k-sync-test,41.008,164,10.138,19.333 -Ceph WAL+DB SSD OSD HDD,read-4k-sync-test,1520.299,6081,0.654,1.539 -Ceph WAL+DB SSD OSD HDD,write-4k-sync-test,78.528,314,4.074,9.101 -Ceph WAL+DB SSD OSD HDD,randread-4k-sync-test,153.303,613,6.518,9.036 -Ceph WAL+DB SSD OSD HDD,randwrite-4k-sync-test,48.677,194,8.785,20.356 -Ceph WAL+DB SSD OSD HDD,multiread-4k-sync-test,6804.880,27219,0.584,1.422 -Ceph WAL+DB SSD OSD HDD,multiwrite-4k-sync-test,311.513,1246,4.978,9.458 -Ceph WAL+DB SSD OSD HDD,multirandread-4k-sync-test,581.756,2327,6.869,10.204 -Ceph WAL+DB SSD OSD HDD,multirandwrite-4k-sync-test,120.556,482,13.463,25.440 -""" - -# Read the data and create a more user-friendly bandwidth column in MB/s -df = pd.read_csv(io.StringIO(csv_data)) -df['bandwidth_mbps'] = df['bandwidth_kibps'] / 1024 - -# --- App Initialization and Global Settings --- -app = dash.Dash(__name__, external_stylesheets=[dbc.themes.FLATLY]) - -# 3. Create a consistent color map for each unique label (cluster topology). -unique_labels = df['label'].unique() -color_map = {label: color for label, color in zip(unique_labels, px.colors.qualitative.Plotly)} - -# --- App Layout --- -app.layout = dbc.Container([ - # Header - dbc.Row([ - dbc.Col([ - html.H1("Ceph iobench Performance Dashboard", className="text-primary"), - html.P( - "Compare benchmark results across different Ceph cluster configurations and metrics.", - className="lead" - ) - ]) - ], className="my-4"), - - # Controls and Graphs Row - dbc.Row([ - # Control Panel Column - dbc.Col([ - dbc.Card([ - dbc.CardBody([ - html.H4("Control Panel", className="card-title"), - html.Hr(), - - # 2. Metric Selection Checklist to view multiple graphs - dbc.Label("Select Metrics to Display:", html_for="metric-checklist", className="fw-bold"), - dcc.Checklist( - id='metric-checklist', - options=[ - {'label': 'IOPS', 'value': 'iops'}, - {'label': 'Latency (ms)', 'value': 'latency_mean_ms'}, - {'label': 'Bandwidth (MB/s)', 'value': 'bandwidth_mbps'} - ], - value=['iops', 'latency_mean_ms'], # Default selection - labelClassName="d-block" - ), - html.Hr(), - - # Configuration Selection Checklist - dbc.Label("Select Configurations to Compare:", html_for="config-checklist", className="fw-bold"), - dcc.Checklist( - id='config-checklist', - options=[{'label': label, 'value': label} for label in unique_labels], - value=unique_labels, # Select all by default - labelClassName="d-block" - ), - ]) - ], className="mb-4") - ], width=12, lg=4), - - # Graph Display Column - This will be populated by the callback - dbc.Col(id='graph-container', width=12, lg=8) - ]) -], fluid=True) - - -# --- Callback Function --- -@app.callback( - Output('graph-container', 'children'), - [Input('metric-checklist', 'value'), - Input('config-checklist', 'value')] -) -def update_graphs(selected_metrics, selected_configs): - """ - This function is triggered when a control's value changes. - It generates and returns a list of graphs based on user selections. - """ - # Handle cases where no selection is made to prevent errors - if not selected_metrics or not selected_configs: - return dbc.Alert("Please select at least one metric and one configuration to view data.", color="info") - - # Filter the DataFrame based on the selected configurations - filtered_df = df[df['label'].isin(selected_configs)] - - # Create a list to hold all the graph components - graph_list = [] - - # Define user-friendly titles for graphs - metric_titles = { - 'iops': 'IOPS Comparison (Higher is Better)', - 'latency_mean_ms': 'Mean Latency (ms) Comparison (Lower is Better)', - 'bandwidth_mbps': 'Bandwidth (MB/s) Comparison (Higher is Better)' - } - - # Loop through each selected metric and create a graph for it - for metric in selected_metrics: - # Determine if sorting should be ascending (for latency) or descending - sort_order = 'total ascending' if metric == 'latency_mean_ms' else 'total descending' - - # Special handling for latency to include error bars for standard deviation - error_y_param = 'latency_stddev_ms' if metric == 'latency_mean_ms' else None - - fig = px.bar( - filtered_df, - x='test_name', - y=metric, - color='label', - barmode='group', - color_discrete_map=color_map, # 3. Apply the consistent color map - error_y=error_y_param, # Adds error bars for latency stddev - title=metric_titles.get(metric, metric), - labels={ - "test_name": "Benchmark Test Name", - "iops": "IOPS", - "latency_mean_ms": "Mean Latency (ms)", - "bandwidth_mbps": "Bandwidth (MB/s)", - "label": "Cluster Configuration" - } - ) - - fig.update_layout( - height=500, - xaxis_title=None, # Clean up x-axis title - legend_title="Configuration", - title_x=0.5, # Center the title - xaxis={'categoryorder': sort_order}, - xaxis_tickangle=-45 # Angle labels to prevent overlap - ) - - # Add the generated graph to our list, wrapped in a column for layout - graph_list.append(dbc.Row(dbc.Col(dcc.Graph(figure=fig)), className="mb-4")) - - return graph_list - -# --- Run the App --- -if __name__ == '__main__': - app.run(debug=True) diff --git a/iobench/dash/iobench-dash-v4.py b/iobench/dash/iobench-dash.py similarity index 100% rename from iobench/dash/iobench-dash-v4.py rename to iobench/dash/iobench-dash.py