Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Open sidebar
watcag-public
fpga-syspart
Commits
2ac18b99
Commit
2ac18b99
authored
Jun 30, 2020
by
harry1080ti
Browse files
run everything except squeezenet
parent
bb6df83d
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
48 additions
and
482 deletions
+48
-482
optimization_algo/approaches/cma_approach.py
optimization_algo/approaches/cma_approach.py
+25
-13
optimization_algo/approaches/cma_approach_logmore.py
optimization_algo/approaches/cma_approach_logmore.py
+0
-384
optimization_algo/approaches/cma_approach_square_size.py
optimization_algo/approaches/cma_approach_square_size.py
+19
-81
optimization_algo/resulting_csv/cma_logmore_sq.csv
optimization_algo/resulting_csv/cma_logmore_sq.csv
+1
-1
optimization_algo/scripts/sweep_nets_cma_sq.sh
optimization_algo/scripts/sweep_nets_cma_sq.sh
+3
-3
No files found.
optimization_algo/approaches/cma_approach.py
View file @
2ac18b99
...
...
@@ -47,6 +47,8 @@ class cma_approach(object):
self
.
trial
=
1
self
.
seeding_type
=
seeding_type
self
.
upper_level_mp
=
False
def
parse_topology_file
(
self
):
layers
=
[]
with
open
(
pc
.
TOPOLOGIES_PATH
+
self
.
topology_file
,
'r'
)
as
f
:
...
...
@@ -273,25 +275,35 @@ class cma_approach(object):
scores
=
[
0
]
*
self
.
es
.
popsize
invalid_sampling
=
0
res_combintaions
=
[
0
]
*
self
.
es
.
popsize
pool
=
Pool
(
processes
=
cpu_count
()
-
4
)
for
result
in
pool
.
imap_unordered
(
self
.
evaluation_top_level
,
id_list
):
scores
[
result
[
0
]]
=
result
[
1
]
if
result
[
1
]
>=
self
.
penalty_offest
:
invalid_sampling
+=
1
else
:
if
not
self
.
is_hybird
:
res_combintaions
[
result
[
0
]]
=
result
[
2
]
pool
.
close
()
pool
.
join
()
res_combinations
=
[
0
]
*
self
.
es
.
popsize
if
self
.
upper_level_mp
:
for
i
in
id_list
:
# 25% slower
result
=
self
.
evaluation_top_level
(
i
)
scores
[
result
[
0
]]
=
result
[
1
]
if
result
[
1
]
>=
self
.
penalty_offest
:
invalid_sampling
+=
1
else
:
if
not
self
.
is_hybird
:
res_combinations
[
result
[
0
]]
=
result
[
2
]
else
:
pool
=
Pool
(
processes
=
cpu_count
()
-
4
)
for
result
in
pool
.
imap_unordered
(
self
.
evaluation_top_level
,
id_list
):
scores
[
result
[
0
]]
=
result
[
1
]
if
result
[
1
]
>=
self
.
penalty_offest
:
invalid_sampling
+=
1
else
:
if
not
self
.
is_hybird
:
res_combinations
[
result
[
0
]]
=
result
[
2
]
pool
.
close
()
pool
.
join
()
if
not
self
.
is_hybird
:
best_in_iteration
=
min
(
scores
)
if
best_in_iteration
<
best_overall
and
best_in_iteration
<
self
.
penalty_offest
:
best_overall
=
best_in_iteration
self
.
best_res
=
res_combin
t
aions
[
scores
.
index
(
min
(
scores
))]
self
.
best_res
=
res_combina
t
ions
[
scores
.
index
(
min
(
scores
))]
##print(str(self.i) + ":", \
# "Sigma:",round(self.es.sigma, 4), \
...
...
optimization_algo/approaches/cma_approach_logmore.py
deleted
100644 → 0
View file @
bb6df83d
import
cma
from
multiprocessing
import
Pool
from
os
import
cpu_count
import
time
class
cma_approach
(
object
):
def
__init__
(
self
,
# data path
path_to_datasrc
=
"alexnet_data.csv"
,
path_to_topology
=
"alexnet.csv"
,
target_col
=
"Cycles"
,
# problem definition
number_of_partition
=
4
,
max_iteration
=
100
,
sigma
=
0.5
,
population_size
=
10
,
# constraint
max_res_unit
=
960
,
initial_res
=
0
,
res_step
=
1
,
penalty_offest
=
10000000000
,
seeding_type
=
"optimised"
,
hybird
=
True
):
self
.
target_col
=
target_col
self
.
start
=
time
.
time
()
self
.
k
=
number_of_partition
self
.
max_iter
=
max_iteration
self
.
sigma
=
sigma
self
.
max_res_unit
=
max_res_unit
self
.
res_step
=
res_step
self
.
population_size
=
population_size
self
.
penalty_offest
=
penalty_offest
self
.
ending_iter
=
0
self
.
is_hybird
=
hybird
self
.
data_src
=
{}
self
.
topology_file
=
path_to_topology
self
.
layers
=
self
.
parse_topology_file
()
self
.
parse_data_set_file
(
path_to_datasrc
)
self
.
best_layer
=
number_of_partition
*
[
0
]
self
.
best_res
=
number_of_partition
*
[
0
]
self
.
total_valid_solution
=
0
self
.
trial
=
1
self
.
seeding_type
=
seeding_type
def
parse_topology_file
(
self
):
layers
=
[]
with
open
(
self
.
topology_file
,
'r'
)
as
f
:
next
(
f
)
for
line
in
f
:
elems
=
line
.
strip
().
split
(
','
)
layers
.
append
(
elems
[
0
])
for
layer
in
layers
:
self
.
data_src
[
layer
]
=
{}
return
layers
def
parse_data_set_file
(
self
,
path_to_data_csv
):
first
=
True
target_idx
=
2
with
open
(
path_to_data_csv
,
'r'
)
as
f
:
for
line
in
f
:
elems
=
line
.
strip
().
split
(
','
)
# print(elems)
if
first
:
for
idx
,
col
in
enumerate
(
elems
):
if
self
.
target_col
in
col
:
target_idx
=
idx
break
first
=
False
else
:
self
.
data_src
[
elems
[
1
]][
int
(
elems
[
0
])]
=
int
(
float
(
elems
[
target_idx
]))
def
regroup_layers
(
self
,
sample
):
# #print("DEBUG", sample)
detail_sample
=
[]
idx
=
0
for
size
in
sample
:
part
=
[]
if
size
==
1
:
part
.
append
(
self
.
layers
[
idx
])
idx
+=
1
else
:
for
i
in
range
(
0
,
size
):
part
.
append
(
self
.
layers
[
i
+
idx
])
idx
+=
size
detail_sample
.
append
(
part
)
return
detail_sample
def
decode
(
self
,
val
,
max_val
):
return
int
(
val
*
max_val
)
def
encode
(
self
,
val
,
max_val
):
return
float
(
val
/
max_val
)
def
filter_layer
(
self
,
layer
):
for
idx
in
range
(
self
.
k
):
if
layer
[
idx
]
<=
0
:
return
False
if
sum
(
layer
)
!=
len
(
self
.
layers
):
return
False
return
True
def
filter_res
(
self
,
res
):
# #print(layer, res)
for
idx
in
range
(
self
.
k
):
if
res
[
idx
]
<=
0
:
return
False
if
sum
(
res
)
!=
self
.
max_res_unit
:
return
False
return
True
def
penalty_layer
(
self
,
layer
):
penalty_score
=
self
.
penalty_offest
if
sum
(
layer
)
!=
len
(
self
.
layers
):
penalty_score
+=
self
.
penalty_offest
else
:
layer
=
[
abs
(
val
)
for
val
in
layer
]
for
idx
in
range
(
self
.
k
):
if
layer
[
idx
]
<=
0
:
penalty_score
*=
1.05
percent_diff
=
(
abs
(
sum
(
layer
)
-
len
(
self
.
layers
))
/
len
(
self
.
layers
))
penalty_score
+=
percent_diff
*
self
.
penalty_offest
return
penalty_score
def
penalty_res
(
self
,
res
):
penalty_score
=
self
.
penalty_offest
if
sum
(
res
)
!=
self
.
max_res_unit
:
penalty_score
+=
self
.
penalty_offest
else
:
res
=
[
abs
(
val
)
for
val
in
res
]
for
idx
in
range
(
self
.
k
):
if
res
[
idx
]
<=
0
:
penalty_score
*=
1.05
percent_diff
=
abs
(
sum
(
res
)
-
self
.
max_res_unit
)
/
self
.
max_res_unit
penalty_score
+=
percent_diff
*
self
.
penalty_offest
return
penalty_score
def
find_max_latency
(
self
,
layer_partition
,
res_partitions
):
latencies
=
[
0
]
*
len
(
layer_partition
)
max_latency_idx
=
0
for
idx
,
part
in
enumerate
(
layer_partition
):
res
=
res_partitions
[
idx
]
for
layer
in
part
:
latencies
[
idx
]
+=
self
.
data_src
[
layer
][
res
]
if
latencies
[
idx
]
>
latencies
[
max_latency_idx
]:
max_latency_idx
=
idx
return
latencies
,
max_latency_idx
def
evaluate_hybird
(
self
,
layer
):
res
=
[
self
.
res_step
]
*
self
.
k
latencies
=
[]
for
i
in
range
(
0
,
int
(
self
.
max_res_unit
/
self
.
res_step
-
self
.
k
*
self
.
res_step
)):
latencies
,
max_idx
=
self
.
find_max_latency
(
layer
,
res
)
res
[
max_idx
]
+=
self
.
res_step
return
latencies
[
max_idx
],
latencies
,
res
,
layer
# not really in used
def
evaluate_full_relaxed
(
self
,
layer
):
seed
=
[]
for
i
in
range
(
self
.
k
-
1
):
seed
.
append
(
int
(
self
.
max_res_unit
/
self
.
k
))
seed
.
append
(
self
.
max_res_unit
-
sum
(
seed
))
# #print(seed)
seed
=
[
self
.
encode
(
val
,
self
.
max_res_unit
)
for
val
in
seed
[:
-
1
]]
es_res
=
cma
.
CMAEvolutionStrategy
(
seed
,
\
self
.
sigma
,
{
'popsize'
:
self
.
population_size
})
i
=
0
while
not
es_res
.
stop
()
and
i
<
self
.
max_iter
:
samples
=
es_res
.
ask
()
scores
=
[
0
]
*
es_res
.
popsize
res
=
[
0
]
*
es_res
.
popsize
for
idx
,
sample
in
enumerate
(
samples
):
res_assign
=
[
self
.
decode
(
val
,
self
.
max_res_unit
)
for
val
in
sample
]
res_assign
.
append
(
self
.
max_res_unit
-
sum
(
res_assign
))
res
[
idx
]
=
res_assign
for
idx
,
r
in
enumerate
(
res
):
if
self
.
filter_res
(
r
):
latencies
,
max_idx
=
self
.
find_max_latency
(
layer
,
r
)
scores
[
idx
]
=
latencies
[
max_idx
]
else
:
scores
[
idx
]
=
self
.
penalty_res
(
r
)
# for idx in range(self.population_size):
# #print(samples[idx], scores[idx])
es_res
.
tell
(
samples
,
scores
)
i
+=
1
res
=
[
self
.
decode
(
val
,
self
.
max_res_unit
)
for
val
in
es_res
.
result
[
0
]]
res
.
append
(
self
.
max_res_unit
-
sum
(
res
))
if
self
.
filter_res
(
r
):
latencies
,
max_idx
=
self
.
find_max_latency
(
layer
,
res
)
else
:
max_latency
=
self
.
penalty_res
(
r
)
latencies
=
[
max_latency
]
*
self
.
k
max_idx
=
0
return
latencies
[
max_idx
],
latencies
,
res
,
layer
def
evaluation_top_level
(
self
,
in_val
):
pid
,
sampling
=
in_val
layer
=
[
self
.
decode
(
val
,
len
(
self
.
layers
))
for
val
in
sampling
]
layer
.
append
(
len
(
self
.
layers
)
-
sum
(
layer
))
penalty
=
0
if
not
self
.
filter_layer
(
layer
):
penalty
=
self
.
penalty_layer
(
layer
)
if
self
.
is_hybird
:
return
pid
,
penalty
else
:
return
pid
,
penalty
*
4
layer
=
self
.
regroup_layers
(
layer
)
if
self
.
is_hybird
:
return
pid
,
self
.
evaluate_hybird
(
layer
)[
0
]
else
:
score
,
_
,
res
,
_
=
self
.
evaluate_full_relaxed
(
layer
)
return
pid
,
score
,
res
def
run
(
self
):
self
.
trial
+=
self
.
trial
if
(
self
.
seeding_type
==
"allzeros"
):
self
.
seed
=
[
0
]
*
(
self
.
k
-
1
)
self
.
seed_od
=
self
.
seed
elif
(
self
.
seeding_type
==
"optimised"
):
self
.
seed
=
[]
for
i
in
range
(
self
.
k
-
1
):
self
.
seed
.
append
(
int
(
len
(
self
.
layers
)
/
self
.
k
))
self
.
seed
.
append
(
len
(
self
.
layers
)
-
sum
(
self
.
seed
))
self
.
seed_od
=
self
.
seed
self
.
seed
=
[
self
.
encode
(
val
,
len
(
self
.
layers
))
for
val
in
self
.
seed
[:
-
1
]]
else
:
raise
ValueError
(
'Invalid Seeding Strategy'
)
self
.
es
=
cma
.
CMAEvolutionStrategy
(
self
.
seed
,
self
.
sigma
,
\
{
'popsize'
:
self
.
population_size
})
best_overall
=
self
.
penalty_offest
self
.
i
=
0
while
not
self
.
es
.
stop
()
and
self
.
i
<
self
.
max_iter
:
samples
=
self
.
es
.
ask
()
id_list
=
[(
idx
,
sample
)
for
idx
,
sample
in
enumerate
(
samples
)]
scores
=
[
0
]
*
self
.
es
.
popsize
invalid_sampling
=
0
res_combintaions
=
[
0
]
*
self
.
es
.
popsize
pool
=
Pool
(
processes
=
1
)
#cpu_count() - 4)
for
result
in
pool
.
imap_unordered
(
self
.
evaluation_top_level
,
id_list
):
scores
[
result
[
0
]]
=
result
[
1
]
if
result
[
1
]
>=
self
.
penalty_offest
:
invalid_sampling
+=
1
else
:
if
not
self
.
is_hybird
:
res_combintaions
[
result
[
0
]]
=
result
[
2
]
pool
.
close
()
pool
.
join
()
if
not
self
.
is_hybird
:
best_in_iteration
=
min
(
scores
)
if
best_in_iteration
<
best_overall
and
best_in_iteration
<
self
.
penalty_offest
:
best_overall
=
best_in_iteration
self
.
best_res
=
res_combintaions
[
scores
.
index
(
min
(
scores
))]
##print(str(self.i) + ":", \
# "Sigma:",round(self.es.sigma, 4), \
# "|| Valid sampling percentage:", \
# (self.population_size - invalid_sampling) /self.population_size *100)
##print("invalid sampling", invalid_sampling)
self
.
valid_sampling_percentage
=
(
self
.
population_size
-
invalid_sampling
)
/
self
.
population_size
*
100
self
.
total_valid_solution
+=
self
.
population_size
-
invalid_sampling
self
.
samples
=
samples
self
.
scores
=
scores
self
.
es
.
tell
(
samples
,
scores
)
self
.
end
=
time
.
time
()
self
.
best_layer
=
[
self
.
decode
(
val
,
len
(
self
.
layers
))
for
val
in
self
.
es
.
result
[
0
]]
self
.
best_layer
.
append
(
len
(
self
.
layers
)
-
sum
(
self
.
best_layer
))
self
.
report
()
self
.
i
+=
1
self
.
ending_iter
=
self
.
i
def
report
(
self
):
##print(self.i, self.es.sigma)
max_latency
=
0
layer
=
[]
res
=
[]
latencies
=
[]
full_latency
,
full_max_idx
=
self
.
find_max_latency
([
self
.
layers
],
[
self
.
max_res_unit
]
*
len
(
self
.
layers
))
if
self
.
is_hybird
:
if
not
self
.
filter_layer
(
self
.
best_layer
):
with
open
(
'cma_logmore.csv'
,
'a'
)
as
csvFile
:
writer
=
csv
.
writer
(
csvFile
,
delimiter
=
','
,
lineterminator
=
"
\n
"
)
for
i
in
range
(
self
.
population_size
):
writer
.
writerow
([
self
.
target_col
,
self
.
i
,
self
.
k
,
self
.
topology_file
,
self
.
seeding_type
,
i
,
self
.
k
*
self
.
scores
[
i
],
1
/
self
.
scores
[
i
],
(
self
.
k
*
self
.
scores
[
i
])
/
(
full_latency
[
full_max_idx
]),(
1
/
self
.
scores
[
i
])
/
(
1
/
full_latency
[
full_max_idx
])])
csvFile
.
close
return
False
layer
=
self
.
regroup_layers
(
self
.
best_layer
)
max_latency
,
latencies
,
res
,
layers
=
self
.
evaluate_hybird
(
layer
)
else
:
if
not
self
.
filter_res
(
self
.
best_res
)
and
not
self
.
filter_layer
(
self
.
best_layer
):
return
False
layer
=
self
.
regroup_layers
(
self
.
best_layer
)
res
=
self
.
best_res
latencies
,
max_idx
=
self
.
find_max_latency
(
layer
,
self
.
best_res
)
max_latency
=
latencies
[
max_idx
]
# generate data for mapping the full array
with
open
(
'cma_logmore.csv'
,
'a'
)
as
csvFile
:
writer
=
csv
.
writer
(
csvFile
,
delimiter
=
','
,
lineterminator
=
"
\n
"
)
for
i
in
range
(
self
.
population_size
):
writer
.
writerow
([
self
.
target_col
,
self
.
i
,
self
.
k
,
self
.
topology_file
,
self
.
seeding_type
,
i
,
self
.
k
*
self
.
scores
[
i
],
1
/
self
.
scores
[
i
],
(
self
.
k
*
self
.
scores
[
i
])
/
(
full_latency
[
full_max_idx
]),(
1
/
self
.
scores
[
i
])
/
(
1
/
full_latency
[
full_max_idx
])])
csvFile
.
close
return
True
if
__name__
==
"__main__"
:
import
csv
import
sys
topology
=
sys
.
argv
[
1
]
k
=
int
(
sys
.
argv
[
2
])
population_size
=
int
(
sys
.
argv
[
3
])
max_res_unit
=
int
(
sys
.
argv
[
4
])
seeding_type
=
sys
.
argv
[
5
]
target_col
=
sys
.
argv
[
6
]
es_hybird
=
cma_approach
(
path_to_datasrc
=
str
(
topology
)
+
"_mem_bound.csv"
,
path_to_topology
=
str
(
topology
)
+
".csv"
,
target_col
=
str
(
target_col
),
number_of_partition
=
k
,
max_iteration
=
10000
,
sigma
=
0.5
,
population_size
=
population_size
,
max_res_unit
=
max_res_unit
,
initial_res
=
0
,
res_step
=
1
,
penalty_offest
=
100000000000
,
seeding_type
=
seeding_type
,
hybird
=
True
)
trials
=
1
#print("======== HYBRID ======== ( k:", k, "trials:", trials, ")")
es_hybird
.
run
()
while
not
es_hybird
.
report
()
and
trials
<
20
:
#print("======== HYBRID ======== ( k:", k, "trials:", trials, ")")
es_hybird
.
run
()
trials
+=
1
k
+=
1
#print("convergence takes", trials, "trials")
optimization_algo/approaches/cma_approach_square_size.py
View file @
2ac18b99
...
...
@@ -139,23 +139,6 @@ class cma_approach(object):
return
penalty_score
# def penalty_res(self, res):
# penalty_score = self.penalty_offest
# if sum(res) != self.max_res_unit:
# penalty_score += self.penalty_offest
# else:
# res = [abs(val) for val in res]
# for idx in range(self.k):
# if res[idx] <= 0:
# penalty_score *= 1.05
# percent_diff = abs(sum(res) - self.max_res_unit) / self.max_res_unit
# penalty_score += percent_diff * self.penalty_offest
# return penalty_score
def
find_max_latency
(
self
,
layer_partition
,
res_partitions
):
latencies
=
[
0
]
*
len
(
layer_partition
)
max_latency_idx
=
0
...
...
@@ -184,54 +167,6 @@ class cma_approach(object):
return
latencies
[
max_idx
],
latencies
,
res
,
layer
# not really in used
# def evaluate_full_relaxed(self, layer):
# seed = []
# for i in range(self.k - 1):
# seed.append(int(self.max_res_unit/self.k))
# seed.append(self.max_res_unit - sum(seed))
# # #print(seed)
# seed = [self.encode(val, self.max_res_unit) for val in seed[:-1]]
# es_res = cma.CMAEvolutionStrategy(seed, \
# self.sigma, {'popsize' : self.population_size})
# i = 0
# while not es_res.stop() and i < self.max_iter:
# samples = es_res.ask()
# scores = [0] * es_res.popsize
# res = [0] * es_res.popsize
# for idx, sample in enumerate(samples):
# res_assign = [self.decode(val, self.max_res_unit) for val in sample]
# res_assign.append(self.max_res_unit - sum(res_assign))
# res[idx] = res_assign
# for idx, r in enumerate(res):
# if self.filter_res(r):
# latencies, max_idx = self.find_max_latency(layer, r)
# scores[idx] = latencies[max_idx]
# else:
# scores[idx] = self.penalty_res(r)
# # for idx in range(self.population_size):
# # #print(samples[idx], scores[idx])
# es_res.tell(samples, scores)
# i += 1
# res = [self.decode(val, self.max_res_unit) for val in es_res.result[0]]
# res.append(self.max_res_unit - sum(res))
# if self.filter_res(r):
# latencies, max_idx = self.find_max_latency(layer, res)
# else:
# max_latency = self.penalty_res(r)
# latencies = [max_latency]*self.k
# max_idx = 0
# return latencies[max_idx], latencies, res, layer
def
evaluation_top_level
(
self
,
in_val
):
pid
,
sampling
=
in_val
layer
=
[
self
.
decode
(
val
,
len
(
self
.
layers
))
for
val
in
sampling
]
...
...
@@ -295,6 +230,11 @@ class cma_approach(object):
pool
.
close
()
pool
.
join
()
# for tup in samples:
# _, scores[tup[0]] = evaluation_top_level(tup)
# if scores[tup[0]] >= self.penalty_offest:
# invalid_sampling += 1
if
not
self
.
is_hybird
:
best_in_iteration
=
min
(
scores
)
...
...
@@ -359,20 +299,20 @@ class cma_approach(object):
full_latency
,
full_max_idx
=
self
.
find_max_latency
([
self
.
layers
],
[
129
]
*
len
(
self
.
layers
))
# PLEASE UNCOMMENT OUT THIS PART IF YOU NOT USING THE BASH SCRIPT WE HAVE PROVIDED
print
(
"================================= RESULT ================================="
)
print
(
"Solution: (out of"
,
self
.
total_valid_solution
,
"solutions)"
)
print
(
layer
)
print
(
"Res mapping:"
)
print
(
res
)
print
(
"Latency for each partition: "
)
print
(
latencies
)
print
(
"Final Latency:"
,
max_latency
*
self
.
k
,
"|| Throught put:"
,
1
/
max_latency
)
print
(
"=========================================================================="
)
print
(
"Map to full array ("
,
self
.
max_res_unit
,
")"
)
print
(
"Final Latency:"
,
full_latency
[
full_max_idx
],
"|| Throught put:"
,
1
/
full_latency
[
full_max_idx
])
print
(
"=========================================================================="
)
print
(
"Throughtput Ratio:"
,
(
1
/
max_latency
)
/
(
1
/
full_latency
[
full_max_idx
]))
print
(
"Latency increase:"
,
(
max_latency
*
self
.
k
)
/
full_latency
[
full_max_idx
])
#
print("================================= RESULT =================================")
#
print("Solution: (out of", self.total_valid_solution, "solutions)")
#
print(layer)
#
print("Res mapping:")
#
print(res)
#
print("Latency for each partition: ")
#
print(latencies)
#
print("Final Latency:", max_latency*self.k, "|| Throught put:", 1/max_latency)
#
print("==========================================================================")
#
print("Map to full array (", self.max_res_unit, ")")
#
print("Final Latency:", full_latency[full_max_idx], "|| Throught put:", 1/full_latency[full_max_idx])
#
print("==========================================================================")
#
print("Throughtput Ratio:", (1/max_latency)/(1/full_latency[full_max_idx]))
#
print("Latency increase:", (max_latency*self.k)/full_latency[full_max_idx])
with
open
(
pc
.
RESULT_CSV_PATH
+
'cma_logmore_sq.csv'
,
'a'
)
as
csvFile
:
writer
=
csv
.
writer
(
csvFile
,
delimiter
=
','
,
lineterminator
=
"
\n
"
)
...
...
@@ -407,10 +347,8 @@ if __name__ == "__main__":
)
trials
=
1
#print("======== HYBRID ======== ( k:", k, "trials:", trials, ")")
es_hybird
.
run
()
while
not
es_hybird
.
report
()
and
trials
<
20
:
#print("======== HYBRID ======== ( k:", k, "trials:", trials, ")")
es_hybird
.
run
()
trials
+=
1
...
...
optimization_algo/resulting_csv/cma_logmore_sq.csv
View file @
2ac18b99
...
...
@@ -27551,4 +27551,4 @@ Cycles,161,29,googlenet.csv,1,1.9360334546580963e-05,1497908,3.044825927301736e-
Cycles,162,29,googlenet.csv,1,1.9360334546580963e-05,1497908,3.044825927301736e-06,328426,6.358437233795399,4.560869115112689,"[['Conv1', 'Conv2red'], ['Conv2', 'Inc3a_1x1', 'Inc3a_3x3red'], ['Inc3a_3x3'], ['Inc3a_5x5red', 'Inc3a_5x5', 'Inc3a_pp'], ['Inc3b_1x1', 'Inc3b_3x3red'], ['Inc3b_3x3', 'Inc3b_5x5red'], ['Inc3b_5x5', 'Inc3b_pp', 'Inc4a_1x1'], ['Inc4a_3x3red', 'Inc4a_3x3'], ['Inc4a_5x5red', 'Inc4a_5x5', 'Inc4a_pp'], ['Inc4b_1x1', 'Inc4b_3x3red'], ['Inc4b_3x3', 'Inc4b_5x5red', 'Inc4b_5x5'], ['Inc4b_pp', 'Inc4c_1x1'], ['Inc4c_3x3red', 'Inc4c_3x3'], ['Inc4c_5x5red', 'Inc4c_5x5'], ['Inc4c_pp', 'Inc4d_1x1', 'Inc4d_3x3red'], ['Inc4d_3x3', 'Inc4d_5x5red'], ['Inc4d_5x5', 'Inc4d_pp'], ['Inc4e_1x1', 'Inc4e_3x3red'], ['Inc4e_3x3'], ['Inc4e_5x5red', 'Inc4e_5x5'], ['Inc4e_pp'], ['Inc5a_1x1', 'Inc5a_3x3red'], ['Inc5a_3x3', 'Inc5a_5x5red'], ['Inc5a_5x5', 'Inc5a_pp'], ['Inc5b_1x1'], ['Inc5b_3x3red'], ['Inc5b_3x3', 'Inc5b_5x5red'], ['Inc5b_5x5', 'Inc5b_pp'], ['FC6']]","[66, 96, 45, 21, 39, 66, 48, 36, 18, 30, 39, 27, 51, 9, 33, 48, 21, 39, 54, 24, 21, 30, 36, 21, 30, 21, 51, 21, 63]",749.1111011505127,0.006118429722042403,"[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]",84.0,2,100,51840,optimised
Cycles,163,29,googlenet.csv,1,1.9360334546580963e-05,1497908,3.044825927301736e-06,328426,6.358437233795399,4.560869115112689,"[['Conv1', 'Conv2red'], ['Conv2', 'Inc3a_1x1', 'Inc3a_3x3red'], ['Inc3a_3x3'], ['Inc3a_5x5red', 'Inc3a_5x5', 'Inc3a_pp'], ['Inc3b_1x1', 'Inc3b_3x3red'], ['Inc3b_3x3', 'Inc3b_5x5red'], ['Inc3b_5x5', 'Inc3b_pp', 'Inc4a_1x1'], ['Inc4a_3x3red', 'Inc4a_3x3'], ['Inc4a_5x5red', 'Inc4a_5x5', 'Inc4a_pp'], ['Inc4b_1x1', 'Inc4b_3x3red'], ['Inc4b_3x3', 'Inc4b_5x5red', 'Inc4b_5x5'], ['Inc4b_pp', 'Inc4c_1x1'], ['Inc4c_3x3red', 'Inc4c_3x3'], ['Inc4c_5x5red', 'Inc4c_5x5'], ['Inc4c_pp', 'Inc4d_1x1', 'Inc4d_3x3red'], ['Inc4d_3x3', 'Inc4d_5x5red'], ['Inc4d_5x5', 'Inc4d_pp'], ['Inc4e_1x1', 'Inc4e_3x3red'], ['Inc4e_3x3'], ['Inc4e_5x5red', 'Inc4e_5x5'], ['Inc4e_pp'], ['Inc5a_1x1', 'Inc5a_3x3red'], ['Inc5a_3x3', 'Inc5a_5x5red'], ['Inc5a_5x5', 'Inc5a_pp'], ['Inc5b_1x1'], ['Inc5b_3x3red'], ['Inc5b_3x3', 'Inc5b_5x5red'], ['Inc5b_5x5', 'Inc5b_pp'], ['FC6']]","[66, 96, 45, 21, 39, 66, 48, 36, 18, 30, 39, 27, 51, 9, 33, 48, 21, 39, 54, 24, 21, 30, 36, 21, 30, 21, 51, 21, 63]",752.5722858905792,0.005706447216186998,"[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]",85.0,2,100,51840,optimised
Cycles,164,29,googlenet.csv,1,1.9360334546580963e-05,1497908,3.044825927301736e-06,328426,6.358437233795399,4.560869115112689,"[['Conv1', 'Conv2red'], ['Conv2', 'Inc3a_1x1', 'Inc3a_3x3red'], ['Inc3a_3x3'], ['Inc3a_5x5red', 'Inc3a_5x5', 'Inc3a_pp'], ['Inc3b_1x1', 'Inc3b_3x3red'], ['Inc3b_3x3', 'Inc3b_5x5red'], ['Inc3b_5x5', 'Inc3b_pp', 'Inc4a_1x1'], ['Inc4a_3x3red', 'Inc4a_3x3'], ['Inc4a_5x5red', 'Inc4a_5x5', 'Inc4a_pp'], ['Inc4b_1x1', 'Inc4b_3x3red'], ['Inc4b_3x3', 'Inc4b_5x5red', 'Inc4b_5x5'], ['Inc4b_pp', 'Inc4c_1x1'], ['Inc4c_3x3red', 'Inc4c_3x3'], ['Inc4c_5x5red', 'Inc4c_5x5'], ['Inc4c_pp', 'Inc4d_1x1', 'Inc4d_3x3red'], ['Inc4d_3x3', 'Inc4d_5x5red'], ['Inc4d_5x5', 'Inc4d_pp'], ['Inc4e_1x1', 'Inc4e_3x3red'], ['Inc4e_3x3'], ['Inc4e_5x5red', 'Inc4e_5x5'], ['Inc4e_pp'], ['Inc5a_1x1', 'Inc5a_3x3red'], ['Inc5a_3x3', 'Inc5a_5x5red'], ['Inc5a_5x5', 'Inc5a_pp'], ['Inc5b_1x1'], ['Inc5b_3x3red'], ['Inc5b_3x3', 'Inc5b_5x5red'], ['Inc5b_5x5', 'Inc5b_pp'], ['FC6']]","[66, 96, 45, 21, 39, 66, 48, 36, 18, 30, 39, 27, 51, 9, 33, 48, 21, 39, 54, 24, 21, 30, 36, 21, 30, 21, 51, 21, 63]",756.4360859394073,0.005636961290888523,"[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]",92.0,2,100,51840,optimised