parallelize runs

This commit is contained in:
2025-03-24 16:59:32 +01:00
parent cea4edd073
commit 29ea7b229e

View File

@@ -1,9 +1,12 @@
#!/bin/python3
import random
import os
import time
from heapq import heappush, heappop
import matplotlib.pyplot as plt
from scipy.stats import t
import numpy as np
from multiprocessing import Pool
class Event:
def __init__(self, event_type, request):
@@ -131,10 +134,26 @@ class Simulation:
def run_single_simulation(args):
c, lambda_val, simulation_time = args
# for different seed in each process
random.seed(time.time() + os.getpid())
try:
sim = Simulation(c, lambda_val)
sim.run(simulation_time)
if len(sim.response_times) > 0:
run_mean = sum(sim.response_times) / len(sim.response_times)
loss_rate = sim.loss_rate
return (run_mean, loss_rate)
else:
return None
except ValueError: # Loss rate too high
return None
def simulation_wrapper():
C_values = [1, 2, 3, 6]
simulation_time = 1000
num_runs = 10
num_runs = 12
min_runs = 5
confidence_level = 0.95
@@ -142,62 +161,55 @@ def simulation_wrapper():
plt.figure(figsize=(12, 8))
for c in C_values:
lambda_points = []
means = []
ci_lower = []
ci_upper = []
print(f"\nProcessing C={c}")
with Pool() as pool: # pool of workers
for c in C_values:
lambda_points = []
means = []
ci_lower = []
ci_upper = []
print(f"\nProcessing C={c}")
for lambda_val in lambda_vals:
run_results = []
loss_rates = []
for lambda_val in lambda_vals:
# run num_runs simulation for each lambda
args_list = [(c, lambda_val, simulation_time) for _ in range(num_runs)]
results = pool.map(run_single_simulation, args_list)
# run num_runs simulation for each lambda
for _ in range(num_runs):
try:
sim = Simulation(c, lambda_val)
sim.run(simulation_time)
# collect results from successful simulations
successful_results = [res for res in results if res is not None]
run_results = [res[0] for res in successful_results]
loss_rates = [res[1] for res in successful_results]
if len(sim.response_times) > 0:
run_mean = sum(sim.response_times)/len(sim.response_times)
run_results.append(run_mean)
loss_rates.append(sim.loss_rate)
# reject if not enough successful run
if len(run_results) >= min_runs:
# statistics
mean_rt = np.mean(run_results)
std_dev = np.std(run_results, ddof=1)
n = len(run_results)
except ValueError: # lossrate too high
# confidence interval
t_value = t.ppf((1 + confidence_level)/2, n-1)
ci = t_value * std_dev / np.sqrt(n)
# loss rate
mean_loss = np.mean(loss_rates)
# store results
lambda_points.append(lambda_val)
means.append(mean_rt)
ci_lower.append(mean_rt - ci)
ci_upper.append(mean_rt + ci)
print(f"C={c}, λ={lambda_val:.2f}, Mean RT={mean_rt:.2f} ± {ci:.2f}, Loss Rate={mean_loss:.2%}")
elif len(run_results) > 0:
print(f"λ={lambda_val:.2f} skipped - only {len(run_results)} successful run(s)")
continue
# reject if not enough successful run
if len(run_results) >= min_runs:
# statistics
mean_rt = np.mean(run_results)
std_dev = np.std(run_results, ddof=1)
n = len(run_results)
# confidence interval
t_value = t.ppf((1 + confidence_level)/2, n-1)
ci = t_value * std_dev / np.sqrt(n)
# loss rate
mean_loss = np.mean(loss_rates)
# store results
lambda_points.append(lambda_val)
means.append(mean_rt)
ci_lower.append(mean_rt - ci)
ci_upper.append(mean_rt + ci)
print(f"C={c}, λ={lambda_val:.2f}, Mean RT={mean_rt:.2f} ± {ci:.2f}, Loss Rate={mean_loss:.2%}")
elif len(run_results) > 0:
print(f"λ={lambda_val:.2f} skipped - only {len(run_results)} successful run(s)")
continue
else:
print(f"Stopped at λ={lambda_val:.2f} - no successful run")
break
else:
print(f"Stopped at λ={lambda_val:.2f} - no successful run")
break
plt.plot(lambda_points, means, label=f'C={c}')
plt.fill_between(lambda_points, ci_lower, ci_upper, alpha=0.2)
plt.plot(lambda_points, means, label=f'C={c}')
plt.fill_between(lambda_points, ci_lower, ci_upper, alpha=0.2)
plt.xlabel('Arrival Rate (λ)')
plt.ylabel('Mean Response Time')
@@ -207,5 +219,6 @@ def simulation_wrapper():
plt.show()
if __name__ == '__main__':
simulation_wrapper()