parallelize runs
This commit is contained in:
115
src/main.py
115
src/main.py
@@ -1,9 +1,12 @@
|
|||||||
#!/bin/python3
|
#!/bin/python3
|
||||||
import random
|
import random
|
||||||
|
import os
|
||||||
|
import time
|
||||||
from heapq import heappush, heappop
|
from heapq import heappush, heappop
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
from scipy.stats import t
|
from scipy.stats import t
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
from multiprocessing import Pool
|
||||||
|
|
||||||
class Event:
|
class Event:
|
||||||
def __init__(self, event_type, request):
|
def __init__(self, event_type, request):
|
||||||
@@ -131,10 +134,26 @@ class Simulation:
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def run_single_simulation(args):
|
||||||
|
c, lambda_val, simulation_time = args
|
||||||
|
# for different seed in each process
|
||||||
|
random.seed(time.time() + os.getpid())
|
||||||
|
try:
|
||||||
|
sim = Simulation(c, lambda_val)
|
||||||
|
sim.run(simulation_time)
|
||||||
|
if len(sim.response_times) > 0:
|
||||||
|
run_mean = sum(sim.response_times) / len(sim.response_times)
|
||||||
|
loss_rate = sim.loss_rate
|
||||||
|
return (run_mean, loss_rate)
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
except ValueError: # Loss rate too high
|
||||||
|
return None
|
||||||
|
|
||||||
def simulation_wrapper():
|
def simulation_wrapper():
|
||||||
C_values = [1, 2, 3, 6]
|
C_values = [1, 2, 3, 6]
|
||||||
simulation_time = 1000
|
simulation_time = 1000
|
||||||
num_runs = 10
|
num_runs = 12
|
||||||
min_runs = 5
|
min_runs = 5
|
||||||
confidence_level = 0.95
|
confidence_level = 0.95
|
||||||
|
|
||||||
@@ -142,62 +161,55 @@ def simulation_wrapper():
|
|||||||
|
|
||||||
plt.figure(figsize=(12, 8))
|
plt.figure(figsize=(12, 8))
|
||||||
|
|
||||||
for c in C_values:
|
with Pool() as pool: # pool of workers
|
||||||
lambda_points = []
|
for c in C_values:
|
||||||
means = []
|
lambda_points = []
|
||||||
ci_lower = []
|
means = []
|
||||||
ci_upper = []
|
ci_lower = []
|
||||||
print(f"\nProcessing C={c}")
|
ci_upper = []
|
||||||
|
print(f"\nProcessing C={c}")
|
||||||
|
|
||||||
for lambda_val in lambda_vals:
|
for lambda_val in lambda_vals:
|
||||||
run_results = []
|
# run num_runs simulation for each lambda
|
||||||
loss_rates = []
|
args_list = [(c, lambda_val, simulation_time) for _ in range(num_runs)]
|
||||||
|
results = pool.map(run_single_simulation, args_list)
|
||||||
|
|
||||||
# run num_runs simulation for each lambda
|
# collect results from successful simulations
|
||||||
for _ in range(num_runs):
|
successful_results = [res for res in results if res is not None]
|
||||||
try:
|
run_results = [res[0] for res in successful_results]
|
||||||
sim = Simulation(c, lambda_val)
|
loss_rates = [res[1] for res in successful_results]
|
||||||
sim.run(simulation_time)
|
|
||||||
|
|
||||||
if len(sim.response_times) > 0:
|
# reject if not enough successful run
|
||||||
run_mean = sum(sim.response_times)/len(sim.response_times)
|
if len(run_results) >= min_runs:
|
||||||
run_results.append(run_mean)
|
# statistics
|
||||||
loss_rates.append(sim.loss_rate)
|
mean_rt = np.mean(run_results)
|
||||||
|
std_dev = np.std(run_results, ddof=1)
|
||||||
|
n = len(run_results)
|
||||||
|
|
||||||
except ValueError: # lossrate too high
|
# confidence interval
|
||||||
|
t_value = t.ppf((1 + confidence_level)/2, n-1)
|
||||||
|
ci = t_value * std_dev / np.sqrt(n)
|
||||||
|
|
||||||
|
# loss rate
|
||||||
|
mean_loss = np.mean(loss_rates)
|
||||||
|
|
||||||
|
# store results
|
||||||
|
lambda_points.append(lambda_val)
|
||||||
|
means.append(mean_rt)
|
||||||
|
ci_lower.append(mean_rt - ci)
|
||||||
|
ci_upper.append(mean_rt + ci)
|
||||||
|
|
||||||
|
print(f"C={c}, λ={lambda_val:.2f}, Mean RT={mean_rt:.2f} ± {ci:.2f}, Loss Rate={mean_loss:.2%}")
|
||||||
|
elif len(run_results) > 0:
|
||||||
|
print(f"λ={lambda_val:.2f} skipped - only {len(run_results)} successful run(s)")
|
||||||
continue
|
continue
|
||||||
|
else:
|
||||||
# reject if not enough successful run
|
print(f"Stopped at λ={lambda_val:.2f} - no successful run")
|
||||||
if len(run_results) >= min_runs:
|
break
|
||||||
# statistics
|
|
||||||
mean_rt = np.mean(run_results)
|
|
||||||
std_dev = np.std(run_results, ddof=1)
|
|
||||||
n = len(run_results)
|
|
||||||
|
|
||||||
# confidence interval
|
|
||||||
t_value = t.ppf((1 + confidence_level)/2, n-1)
|
|
||||||
ci = t_value * std_dev / np.sqrt(n)
|
|
||||||
|
|
||||||
# loss rate
|
|
||||||
mean_loss = np.mean(loss_rates)
|
|
||||||
|
|
||||||
# store results
|
|
||||||
lambda_points.append(lambda_val)
|
|
||||||
means.append(mean_rt)
|
|
||||||
ci_lower.append(mean_rt - ci)
|
|
||||||
ci_upper.append(mean_rt + ci)
|
|
||||||
|
|
||||||
print(f"C={c}, λ={lambda_val:.2f}, Mean RT={mean_rt:.2f} ± {ci:.2f}, Loss Rate={mean_loss:.2%}")
|
|
||||||
elif len(run_results) > 0:
|
|
||||||
print(f"λ={lambda_val:.2f} skipped - only {len(run_results)} successful run(s)")
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
print(f"Stopped at λ={lambda_val:.2f} - no successful run")
|
|
||||||
break
|
|
||||||
|
|
||||||
|
|
||||||
plt.plot(lambda_points, means, label=f'C={c}')
|
plt.plot(lambda_points, means, label=f'C={c}')
|
||||||
plt.fill_between(lambda_points, ci_lower, ci_upper, alpha=0.2)
|
plt.fill_between(lambda_points, ci_lower, ci_upper, alpha=0.2)
|
||||||
|
|
||||||
plt.xlabel('Arrival Rate (λ)')
|
plt.xlabel('Arrival Rate (λ)')
|
||||||
plt.ylabel('Mean Response Time')
|
plt.ylabel('Mean Response Time')
|
||||||
@@ -207,5 +219,6 @@ def simulation_wrapper():
|
|||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
simulation_wrapper()
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user