Commit a350e17a authored by harry1080ti's avatar harry1080ti
Browse files

update somthing

parent fb06d64f
......@@ -21,7 +21,8 @@ class cma_approach(object):
res_step = 1,
penalty_offest = 10000000000,
seeding_type="optimised",
hybird = True
hybird = True,
print_to_csv = True
):
self.target_col = target_col
self.start = time.time()
......@@ -48,6 +49,7 @@ class cma_approach(object):
self.seeding_type = seeding_type
self.upper_level_mp = False
self.print_to_csv = print_to_csv
def parse_topology_file(self):
layers = []
......@@ -277,26 +279,31 @@ class cma_approach(object):
res_combinations = [0] * self.es.popsize
if self.upper_level_mp:
for i in id_list: # 25% slower
result = self.evaluation_top_level(i)
scores[result[0]] = result[1]
if result[1] >= self.penalty_offest:
invalid_sampling += 1
else:
if not self.is_hybird:
res_combinations[result[0]] = result[2]
else:
pool = Pool(processes = cpu_count() - 4)
for result in pool.imap_unordered(self.evaluation_top_level, id_list):
scores[result[0]] = result[1]
if result[1] >= self.penalty_offest:
invalid_sampling += 1
else:
if not self.is_hybird:
res_combinations[result[0]] = result[2]
pool.close()
pool.join()
# if self.upper_level_mp:
# for i in id_list: # 25% slower
# result = self.evaluation_top_level(i)
# scores[result[0]] = result[1]
# if result[1] >= self.penalty_offest:
# invalid_sampling += 1
# else:
# if not self.is_hybird:
# res_combinations[result[0]] = result[2]
# else:
# pool = Pool(processes = cpu_count() - 4)
# for result in pool.imap_unordered(self.evaluation_top_level, id_list):
# scores[result[0]] = result[1]
# if result[1] >= self.penalty_offest:
# invalid_sampling += 1
# else:
# if not self.is_hybird:
# res_combinations[result[0]] = result[2]
# pool.close()
# pool.join()
for tup in id_list:
_, scores[tup[0]] = self.evaluation_top_level(tup)
if scores[tup[0]] >= self.penalty_offest:
invalid_sampling += 1
if not self.is_hybird:
best_in_iteration = min(scores)
......@@ -333,11 +340,11 @@ class cma_approach(object):
##print("RESULT NOT VALID")
##print("Layer:", self.best_layer, "sum: ", sum(self.best_layer))
#print(self.penalty_layer(self.best_layer))
with open(pc.RESULT_CSV_PATH+'cma.csv', 'a') as csvFile:
writer = csv.writer(csvFile, delimiter=',', lineterminator="\n")
writer.writerow([self.target_col,self.i,self.k, self.topology_file, 0,0, 0, 0, 0, 0, 0, layer, res, self.end-self.start, self.es.sigma, self.seed_od,self.valid_sampling_percentage, self.trial, self.population_size, self.max_res_unit, self.seeding_type])
csvFile.close
if self.print_to_csv:
with open(pc.RESULT_CSV_PATH+'657_cma.csv', 'a') as csvFile:
writer = csv.writer(csvFile, delimiter=',', lineterminator="\n")
writer.writerow([self.target_col,self.i,self.k, self.topology_file, 0,0, 0, 0, 0, 0, 0, layer, res, self.end-self.start, self.es.sigma, self.seed_od,self.valid_sampling_percentage, self.trial, self.population_size, self.max_res_unit, self.seeding_type])
csvFile.close
return False
......@@ -374,16 +381,20 @@ class cma_approach(object):
# print("Throughtput Ratio:", (1/max_latency)/(1/full_latency[full_max_idx]))
# print("Latency increase:", (max_latency*self.k)/full_latency[full_max_idx])
with open(pc.RESULT_CSV_PATH+'cma.csv', 'a') as csvFile:
writer = csv.writer(csvFile, delimiter=',', lineterminator="\n")
writer.writerow([self.target_col,self.i,self.k, self.topology_file, 1,(1/max_latency), max_latency*self.k, 1/full_latency[full_max_idx], full_latency[full_max_idx], (1/max_latency)/(1/full_latency[full_max_idx]), (max_latency*self.k)/full_latency[full_max_idx], layer, res, self.end-self.start, self.es.sigma, self.seed_od,self.valid_sampling_percentage, self.trial, self.population_size, self.max_res_unit, self.seeding_type])
csvFile.close
if self.print_to_csv:
with open(pc.RESULT_CSV_PATH+'657_cma.csv', 'a') as csvFile:
writer = csv.writer(csvFile, delimiter=',', lineterminator="\n")
writer.writerow([self.target_col,self.i,self.k, self.topology_file, 1,(1/max_latency), max_latency*self.k, 1/full_latency[full_max_idx], full_latency[full_max_idx], (1/max_latency)/(1/full_latency[full_max_idx]), (max_latency*self.k)/full_latency[full_max_idx], layer, res, self.end-self.start, self.es.sigma, self.seed_od,self.valid_sampling_percentage, self.trial, self.population_size, self.max_res_unit, self.seeding_type])
csvFile.close
result = [self.target_col,self.i,self.k, self.topology_file, 1,(1/max_latency), max_latency*self.k, 1/full_latency[full_max_idx], full_latency[full_max_idx], (1/max_latency)/(1/full_latency[full_max_idx]), (max_latency*self.k)/full_latency[full_max_idx], layer, res, self.end-self.start, self.es.sigma, self.seed_od,self.valid_sampling_percentage, self.trial, self.population_size, self.max_res_unit, self.seeding_type]
return True
if __name__ == "__main__":
import csv
import sys
# python3 cma_approach.py googlenet 20 100 960 optimised DRAM_cycle
# python3 cma_approach.py googlenet 20 100 960 allzeros DRAM_cycle
topology = sys.argv[1]
k = int(sys.argv[2])
population_size = int(sys.argv[3])
......@@ -403,7 +414,8 @@ if __name__ == "__main__":
res_step = 1,
penalty_offest = 100000000000,
seeding_type = seeding_type,
hybird = True
hybird = True,
print_to_csv = True
)
trials = 19
......
......@@ -4,6 +4,7 @@ import sys
import random
import operator
import path_constant as pc
import numpy as np
from multiprocessing import Pool
from os import cpu_count
......@@ -31,7 +32,8 @@ class ga_approach(object):
max_res_unit = 960, initial_res = 0,
res_step = 1,
penalty_offest = 100000000000
penalty_offest = 100000000000,
print_to_csv = True
):
self.target_col = target_col
......@@ -58,13 +60,17 @@ class ga_approach(object):
self.total_valid_solution = 0
self.trial = 1
self.print_to_csv = print_to_csv
def seeding_generation(self):
og = list(range(len(self.layers) - 1))
# print(og)
pop = []
for _ in range(self.pop_size):
pop.append(swap_random(og))
for _ in range(np.random.randint(0, 50)):
og = swap_random(og)
pop.append(og)
return pop
......@@ -97,7 +103,7 @@ class ga_approach(object):
self.data_src[elems[1]][int(elems[0])] = int(float(elems[target_idx]))
def decode_gene(self, gene):
useful_gene = gene[0:k-1]
useful_gene = gene[0:self.k-1]
useful_gene.sort()
solution_layer_domain = []
part = []
......@@ -130,7 +136,7 @@ class ga_approach(object):
return latencies, max_latency_idx
def evaluate_hybird(self, gene):
def evaluate_hybrid(self, gene):
layer = self.decode_gene(gene)
res = [self.res_step] * self.k
latencies = []
......@@ -145,13 +151,17 @@ class ga_approach(object):
# evaluate all possible population
# return [(score1, gene1), (score1, gene1), ...]
rankings = []
pool = Pool(processes = (cpu_count() - 4))
for result in pool.imap_unordered(self.evaluate_hybird, genes):
score = result[0]
gene = result[4]
# pool = Pool(processes = (cpu_count() - 4))
# for result in pool.imap_unordered(self.evaluate_hybrid, genes):
# score = result[0]
# gene = result[4]
# rankings.append((score, gene))
# pool.close()
# pool.join()
for g in genes:
score, _, _, _, gene = self.evaluate_hybrid(g)
rankings.append((score, gene))
pool.close()
pool.join()
rankings.sort(key = operator.itemgetter(0))
return rankings
......@@ -159,52 +169,35 @@ class ga_approach(object):
def crossover(self, parents):
mom, dad = parents
if random.uniform(0, 1) < self.crossover_prob:
st = random.randint(0, self.k - 1)
end = random.randint(0, len(mom) - 1)
while end == st:
end = random.randint(0, len(mom) - 1)
if end < st:
st, end = end, st
# #print(st, end)
st = random.randint(0, self.k-1)
end = st + int(len(mom)/2)
child_m, child_d = [-1]*len(mom), [-1]*len(mom)
child_m[st:end] = mom[st:end]
child_d[st:end] = dad[st:end]
# #print(child_m, child_d)
c_idx = end
cursor = end
while True:
if dad[cursor] not in child_m:
child_m[c_idx] = dad[cursor]
c_idx += 1
cursor += 1
if cursor == len(mom):
cursor = 0
# print(child_m, child_d)
cursor = 0
for elem_d in dad:
if elem_d in child_m:
continue
if c_idx == len(mom):
c_idx = 0
while child_m[cursor] != -1:
cursor += 1
child_m[cursor] = elem_d
if -1 not in child_m:
break
c_idx = end
cursor = end
while True:
if mom[cursor] not in child_d:
child_d[c_idx] = mom[cursor]
c_idx += 1
cursor = 0
for elem_m in mom:
if elem_m in child_d:
continue
cursor += 1
if cursor == len(mom):
cursor = 0
while child_d[cursor] != -1:
cursor += 1
if c_idx == len(mom):
c_idx = 0
child_d[cursor] = elem_m
if -1 not in child_d:
break
......@@ -235,10 +228,10 @@ class ga_approach(object):
print("!! Improved by", (last_best_latency - gen_best_score) /last_best_latency * 100)
last_best_latency = gen_best_score
non_improving_cnt = 0
self.mutation_prob_ad = self.mutation_prob_og
# self.mutation_prob_ad = self.mutation_prob_og
else:
non_improving_cnt += 1
self.mutation_prob_ad *= 0.1
self.mutation_prob_ad *= 0.95
done_eva = time.time()
......@@ -256,14 +249,23 @@ class ga_approach(object):
parent_list.append((elite_pop[i1], elite_pop[i2]))
# Generate Offspring
pool = Pool(processes = (cpu_count() - 4))
for result in pool.imap_unordered(self.crossover, parent_list):
child_a, child_b = result[0], result[1]
current_pop.append(self.mutation(child_a))
current_pop.append(self.mutation(child_b))
pool.close()
pool.join()
OffSpring = []
# pool = Pool(processes = (cpu_count() - 2))
# for result in pool.imap_unordered(self.crossover, parent_list):
# child_a, child_b = result[0], result[1]
# OffSpring.append(self.mutation(child_a))
# OffSpring.append(self.mutation(child_b))
# pool.close()
# pool.join()
for parent in parent_list:
child_a, child_b = self.crossover(parent)
OffSpring.append(self.mutation(child_a))
OffSpring.append(self.mutation(child_b))
current_pop.extend(OffSpring)
# print(self.best_gene)
self.end = time.time()
done_generate_offpsring = time.time()
......@@ -277,7 +279,7 @@ class ga_approach(object):
def report(self):
max_latency, latencies, res, layer, _ = \
self.evaluate_hybird(self.best_gene)
self.evaluate_hybrid(self.best_gene)
# generate data for mapping the full array
full_latency, full_max_idx = \
......@@ -298,18 +300,28 @@ class ga_approach(object):
# print("Throughtput Ratio:", (1/max_latency)/(1/full_latency[full_max_idx]))
# print("Latency increase:", (max_latency*self.k)/full_latency[full_max_idx])
# PLEASE UNCOMMENT OUT THIS PART IF YOU NOT USING THE BASH SCRIPT WE HAVE PROVIDED
with open(pc.RESULT_CSV_PATH+'ga.csv', 'a') as csvFile:
writer = csv.writer(csvFile, delimiter=',', lineterminator="\n")
writer.writerow([self.target_col, self.gen,self.k,self.topology_file, 1,
(1/max_latency), max_latency*self.k,
1/full_latency[full_max_idx], full_latency[full_max_idx],
(1/max_latency)/(1/full_latency[full_max_idx]),
(max_latency*self.k)/full_latency[full_max_idx],
layer, res,
self.end-self.start, 0, 0, 100,
1,self.pop_size,self.max_res_unit,"ga"])
csvFile.close
if self.print_to_csv:
with open(pc.RESULT_CSV_PATH+'657_ga.csv', 'a') as csvFile:
writer = csv.writer(csvFile, delimiter=',', lineterminator="\n")
writer.writerow([self.target_col, self.gen,self.k,self.topology_file, 1,
(1/max_latency), max_latency*self.k,
1/full_latency[full_max_idx], full_latency[full_max_idx],
(1/max_latency)/(1/full_latency[full_max_idx]),
(max_latency*self.k)/full_latency[full_max_idx],
layer, res,
self.end-self.start, 0, 0, 100,
1,self.pop_size,self.max_res_unit,"ga"])
csvFile.close
result = [self.target_col, self.gen,self.k,self.topology_file, 1,
(1/max_latency), max_latency*self.k,
1/full_latency[full_max_idx], full_latency[full_max_idx],
(1/max_latency)/(1/full_latency[full_max_idx]),
(max_latency*self.k)/full_latency[full_max_idx],
layer, res,
self.end-self.start, 0, 0, 100,
1,self.pop_size,self.max_res_unit,"ga"]
return True, result
if __name__ == "__main__":
# python3 ga_approach.py googlenet 20 10 100 960 DRAM_cycle
......@@ -328,8 +340,8 @@ if __name__ == "__main__":
number_of_partition = k, max_generation = 10000,
population_size = population_size,
elite_population = 10,
crossover_prob = 0.75,
mutation_prob = 0.7,
crossover_prob = 0.8,
mutation_prob = 0.3,
max_res_unit = 960, initial_res = 0,
res_step = 1,
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment