Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Open sidebar
watcag-public
fpga-syspart
Commits
fb06d64f
Commit
fb06d64f
authored
Jul 02, 2020
by
harry1080ti
Browse files
square performance done
parent
0ffec73a
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
74 additions
and
71 deletions
+74
-71
optimization_algo/rScripts/Square Performance.pdf
optimization_algo/rScripts/Square Performance.pdf
+0
-0
optimization_algo/rScripts/square-latency-tp_gain.r
optimization_algo/rScripts/square-latency-tp_gain.r
+71
-68
optimization_algo/rScripts/square-vs-rect.r
optimization_algo/rScripts/square-vs-rect.r
+1
-1
optimization_algo/rScripts/square_vs_rect.pdf
optimization_algo/rScripts/square_vs_rect.pdf
+0
-0
optimization_algo/resulting_csv/cma_logmore_sq_full_evo.csv
optimization_algo/resulting_csv/cma_logmore_sq_full_evo.csv
+1
-1
scaleSim/scale.py
scaleSim/scale.py
+1
-1
No files found.
optimization_algo/rScripts/Square Performance.pdf
View file @
fb06d64f
No preview for this file type
optimization_algo/rScripts/square-latency-tp_gain.r
View file @
fb06d64f
...
...
@@ -10,84 +10,87 @@ ts=25
# plot_2 > latency_penalty v partition
pdf
(
"Square Performance.pdf"
,
height
=
9
,
width
=
15
)
sq_data
<-
read.csv
(
"../resulting_csv/cma_logmore_sq.csv"
,
header
=
TRUE
,
sep
=
","
)
sq_data_subset
<-
subset
(
sq_data
,
feasable
==
1
&
target
==
"DRAM_cycle"
&
seeding_type
==
"allzeros"
)
sq_data
<-
read.csv
(
"../resulting_csv/cma_logmore_sq
_full_evo
.csv"
,
header
=
TRUE
,
sep
=
","
)
sq_data_subset
<-
subset
(
sq_data
,
feasable
==
1
&
target
==
"DRAM_cycle"
&
seeding_type
==
"allzeros"
&
popsize
==
500
)
topo_unique
<-
unique
(
sq_data_subset
$
topology
)
partition_unique
<-
unique
(
sq_data_subset
$
partitions
)
res_unique
<-
unique
(
sq_data_subset
$
res_unit
)
column_data
<-
data.frame
(
matrix
(
ncol
=
7
,
nrow
=
0
))
col_headings
<-
c
(
"topology"
,
"partitions"
,
"sq_tp_gain"
,
"sq_latency_penalty"
,
"tp_gain_latency_metrics"
,
"res"
,
"tp_partition"
)
names
(
column_data
)
<-
col_headings
print
(
topo_unique
)
for
(
topo
in
topo_unique
)
{
topo_subset
=
subset
(
sq_data_subset
,
topology
==
topo
)
partition_unique
<-
unique
(
topo_subset
$
partitions
)
print
(
partition_unique
)
res_unique
<-
unique
(
topo_subset
$
res_unit
)
print
(
res_unique
)
column_data
<-
data.frame
(
matrix
(
ncol
=
7
,
nrow
=
0
))
col_headings
<-
c
(
"topology"
,
"partitions"
,
"sq_tp_gain"
,
"sq_latency_penalty"
,
"tp_gain_latency_metrics"
,
"res"
,
"tp_partition"
)
names
(
column_data
)
<-
col_headings
for
(
part
in
partition_unique
)
{
for
(
res
in
res_unique
)
{
sq_data_uni
=
subset
(
sq_data_subset
,
topology
==
topo
&
partitions
==
part
&
res_unit
==
res
)
if
(
dim
(
sq_data_uni
)[
1
]
!=
0
)
{
sq_max_evo
=
max
(
sq_data_uni
$
evo_counter
)
sq_row
=
which
(
sq_data_uni
$
evo_counter
==
sq_max_evo
)
topo_subset_uni
=
subset
(
topo_subset
,
partitions
==
part
&
res_unit
==
res
)
if
(
dim
(
topo_subset_uni
)[
1
]
!=
0
)
{
sq_max_evo
=
max
(
topo_subset_uni
$
evo_counter
)
sq_row
=
which
(
topo_subset_uni
$
evo_counter
==
sq_max_evo
)[
1
]
column_data
<-
rbind
(
column_data
,
data.frame
(
"topology"
=
topo
,
"partitions"
=
part
,
"sq_tp_gain"
=
sq_data
_uni
$
tp_gain
[
sq_row
],
"sq_latency_penalty"
=
sq_data
_uni
$
latency_penalty
[
sq_row
],
"tp_gain_latency_metrics"
=
sq_data
_uni
$
tp_gain
[
sq_row
]
/
sq_data
_uni
$
latency_penalty
[
sq_row
],
"res"
=
toString
(
sq_data
_uni
$
res_unit
[
sq_row
]),
"tp_partition"
=
sq_data
_uni
$
tp_partition
[
sq_row
]))
"sq_tp_gain"
=
topo_subset
_uni
$
tp_gain
[
sq_row
],
"sq_latency_penalty"
=
topo_subset
_uni
$
latency_penalty
[
sq_row
],
"tp_gain_latency_metrics"
=
topo_subset
_uni
$
tp_gain
[
sq_row
]
/
topo_subset
_uni
$
latency_penalty
[
sq_row
],
"res"
=
toString
(
topo_subset
_uni
$
res_unit
[
sq_row
]),
"tp_partition"
=
topo_subset
_uni
$
tp_partition
[
sq_row
]))
}
}
}
d
<-
ggplot
(
column_data
,
aes
(
y
=
as.numeric
(
tp_partition
),
x
=
as.numeric
(
partitions
),
group
=
res
))
d
<-
d
+
geom_point
(
size
=
ps
,
aes
(
color
=
res
,
shape
=
res
))
d
<-
d
+
geom_line
(
size
=
ls
,
aes
(
color
=
res
))
d
<-
d
+
labs
(
x
=
"No of Partitions"
,
y
=
"Throughput"
)
d
<-
d
+
ggtitle
(
paste
(
topo
,
"Square size array Throughtput by size tier"
))
d
<-
d
+
theme_bw
()
d
<-
d
+
theme
(
axis.text
=
element_text
(
size
=
ats
))
d
<-
d
+
theme
(
text
=
element_text
(
size
=
ts
))
d
<-
d
+
theme
(
legend.position
=
"bottom"
)
p
<-
ggplot
(
column_data
,
aes
(
y
=
as.numeric
(
sq_tp_gain
),
x
=
as.numeric
(
partitions
),
group
=
res
))
p
<-
p
+
geom_point
(
size
=
ps
,
aes
(
color
=
res
,
shape
=
res
))
p
<-
p
+
geom_line
(
size
=
ls
,
aes
(
color
=
res
))
p
<-
p
+
geom_hline
(
yintercept
=
1
)
p
<-
p
+
labs
(
x
=
"No of Partitions"
,
y
=
"Throughput Gain"
)
p
<-
p
+
ggtitle
(
paste
(
topo
,
"Square size array Throughtput Gain by size tier"
))
p
<-
p
+
theme_bw
()
p
<-
p
+
theme
(
axis.text
=
element_text
(
size
=
ats
))
p
<-
p
+
theme
(
text
=
element_text
(
size
=
ts
))
p
<-
p
+
theme
(
legend.position
=
"bottom"
)
q
<-
ggplot
(
column_data
,
aes
(
y
=
as.numeric
(
sq_latency_penalty
),
x
=
as.numeric
(
partitions
),
group
=
res
))
q
<-
q
+
geom_point
(
size
=
ps
,
aes
(
color
=
res
,
shape
=
res
))
q
<-
q
+
geom_line
(
size
=
ls
,
aes
(
color
=
res
))
q
<-
q
+
geom_hline
(
yintercept
=
1
)
q
<-
q
+
labs
(
x
=
"No of Partitions"
,
y
=
"Latency Penalty"
)
q
<-
q
+
ggtitle
(
paste
(
topo
,
"Square size array Latency Penalty by size tier"
))
q
<-
q
+
theme_bw
()
q
<-
q
+
theme
(
axis.text
=
element_text
(
size
=
ats
))
q
<-
q
+
theme
(
text
=
element_text
(
size
=
ts
))
q
<-
q
+
theme
(
legend.position
=
"bottom"
)
z
<-
ggplot
(
column_data
,
aes
(
y
=
as.numeric
(
tp_gain_latency_metrics
),
x
=
as.numeric
(
partitions
),
group
=
res
))
z
<-
z
+
geom_point
(
size
=
ps
,
aes
(
color
=
res
,
shape
=
res
))
z
<-
z
+
geom_line
(
size
=
ls
,
aes
(
color
=
res
))
z
<-
z
+
geom_hline
(
yintercept
=
1
)
z
<-
z
+
labs
(
x
=
"No of Partitions"
,
y
=
"Metrics(tp/latency)"
)
z
<-
z
+
ggtitle
(
paste
(
topo
,
"Square size array Metrics by size tier"
))
z
<-
z
+
theme_bw
()
z
<-
z
+
theme
(
axis.text
=
element_text
(
size
=
ats
))
z
<-
z
+
theme
(
text
=
element_text
(
size
=
ts
))
z
<-
z
+
theme
(
legend.position
=
"bottom"
)
print
(
d
)
print
(
p
)
print
(
q
)
print
(
z
)
}
d
<-
ggplot
(
column_data
,
aes
(
y
=
as.numeric
(
tp_partition
),
x
=
as.numeric
(
partitions
),
group
=
res
))
d
<-
d
+
geom_point
(
size
=
ps
,
aes
(
color
=
res
,
shape
=
res
))
d
<-
d
+
geom_line
(
size
=
ls
,
aes
(
color
=
res
))
d
<-
d
+
labs
(
x
=
"No of Partitions"
,
y
=
"Throughput"
)
d
<-
d
+
ggtitle
(
"Square size array Throughtput by size tier"
)
d
<-
d
+
theme_bw
()
d
<-
d
+
theme
(
axis.text
=
element_text
(
size
=
ats
))
d
<-
d
+
theme
(
text
=
element_text
(
size
=
ts
))
d
<-
d
+
theme
(
legend.position
=
"bottom"
)
p
<-
ggplot
(
column_data
,
aes
(
y
=
as.numeric
(
sq_tp_gain
),
x
=
as.numeric
(
partitions
),
group
=
res
))
p
<-
p
+
geom_point
(
size
=
ps
,
aes
(
color
=
res
,
shape
=
res
))
p
<-
p
+
geom_line
(
size
=
ls
,
aes
(
color
=
res
))
p
<-
p
+
geom_hline
(
yintercept
=
1
)
p
<-
p
+
labs
(
x
=
"No of Partitions"
,
y
=
"Throughput Gain"
)
p
<-
p
+
ggtitle
(
"Square size array Throughtput Gain by size tier"
)
p
<-
p
+
theme_bw
()
p
<-
p
+
theme
(
axis.text
=
element_text
(
size
=
ats
))
p
<-
p
+
theme
(
text
=
element_text
(
size
=
ts
))
p
<-
p
+
theme
(
legend.position
=
"bottom"
)
q
<-
ggplot
(
column_data
,
aes
(
y
=
as.numeric
(
sq_latency_penalty
),
x
=
as.numeric
(
partitions
),
group
=
res
))
q
<-
q
+
geom_point
(
size
=
ps
,
aes
(
color
=
res
,
shape
=
res
))
q
<-
q
+
geom_line
(
size
=
ls
,
aes
(
color
=
res
))
q
<-
q
+
geom_hline
(
yintercept
=
1
)
q
<-
q
+
labs
(
x
=
"No of Partitions"
,
y
=
"Latency Penalty"
)
q
<-
q
+
ggtitle
(
"Square size array Latency Penalty by size tier"
)
q
<-
q
+
theme_bw
()
q
<-
q
+
theme
(
axis.text
=
element_text
(
size
=
ats
))
q
<-
q
+
theme
(
text
=
element_text
(
size
=
ts
))
q
<-
q
+
theme
(
legend.position
=
"bottom"
)
z
<-
ggplot
(
column_data
,
aes
(
y
=
as.numeric
(
tp_gain_latency_metrics
),
x
=
as.numeric
(
partitions
),
group
=
res
))
z
<-
z
+
geom_point
(
size
=
ps
,
aes
(
color
=
res
,
shape
=
res
))
z
<-
z
+
geom_line
(
size
=
ls
,
aes
(
color
=
res
))
z
<-
z
+
geom_hline
(
yintercept
=
1
)
z
<-
z
+
labs
(
x
=
"No of Partitions"
,
y
=
"Metrics(tp/latency)"
)
z
<-
z
+
ggtitle
(
"Square size array Metrics by size tier"
)
z
<-
z
+
theme_bw
()
z
<-
z
+
theme
(
axis.text
=
element_text
(
size
=
ats
))
z
<-
z
+
theme
(
text
=
element_text
(
size
=
ts
))
z
<-
z
+
theme
(
legend.position
=
"bottom"
)
print
(
d
)
print
(
p
)
print
(
q
)
print
(
z
)
dev.off
()
optimization_algo/rScripts/square-vs-rect.r
View file @
fb06d64f
...
...
@@ -10,7 +10,7 @@ ts=25
# plot_2 > latency_penalty v partition
pdf
(
file
=
"square_vs_rect.pdf"
,
height
=
9
,
width
=
15
)
sq_data
<-
read.csv
(
"../resulting_csv/cma_logmore_sq_
2
.csv"
,
header
=
TRUE
,
sep
=
","
)
sq_data
<-
read.csv
(
"../resulting_csv/cma_logmore_sq_
full_evo
.csv"
,
header
=
TRUE
,
sep
=
","
)
sq_data_subset
<-
subset
(
sq_data
,
feasable
==
1
&
target
==
"DRAM_cycle"
&
seeding_type
==
"optimised"
)
data
<-
read.csv
(
"../resulting_csv/cma.csv"
,
header
=
TRUE
,
sep
=
","
)
...
...
optimization_algo/rScripts/square_vs_rect.pdf
View file @
fb06d64f
No preview for this file type
optimization_algo/resulting_csv/cma_logmore_sq_full_evo.csv
View file @
fb06d64f
D,R,A,M,_,c,y,c,l,
e
target, evo_counter,partitions,topology, feasable, tp_partition, latency_partition, tp_fullmap, latency_fm, tp_gain, latency_penalty, best_layer_partition, best_resource_partition, time_taken, sigma, seed, valid_sampling_percentage, trial, popsize, res_unit, seeding_typ
e
DRAM_cycle,0,3,FasterRCNN.csv,1,8.259773170109203e-07,3632061,4.2228695412021157e-07,2368058,1.9559621933662459,1.5337719768688098,"[['Conv1', 'CB2a_1', 'CB2a_2', 'CB2a_3', 'CB2s', 'IB2b_1', 'IB2b_2', 'IB2b_3', 'IB2c_1', 'IB2c_2', 'IB2c_3', 'CB3a_1', 'CB3a_2', 'CB3a_3', 'CB3s'], ['IB3b_1', 'IB3b_2', 'IB3b_3', 'IB3c_1', 'IB3c_2', 'IB3c_3', 'IB3d_1', 'IB3d_2', 'IB3d_3', 'CB4a_1', 'CB4a_2', 'CB4a_3', 'CB4s', 'IB4b_1', 'IB4b_2'], ['IB4b_3', 'IB4c_1', 'IB4c_2', 'IB4c_3', 'IB4d_1', 'IB4d_2', 'IB4d_3', 'IB4e_1', 'IB4e_2', 'IB4e_3', 'IB4f_1', 'IB4f_2', 'IB4f_3', 'RPN_Conv1', 'RPN_Conv_bbox', 'RPN_Conv3_cls']]","[105, 48, 66]",0.05727791786193848,0.6682246239536419,"[0, 0]",22.0,2,100,17280,allzeros
DRAM_cycle,1,3,FasterRCNN.csv,1,9.160285764274702e-07,3275007,4.2228695412021157e-07,2368058,2.1692087986376825,1.3829927307523717,"[['Conv1', 'CB2a_1', 'CB2a_2', 'CB2a_3', 'CB2s', 'IB2b_1', 'IB2b_2', 'IB2b_3', 'IB2c_1', 'IB2c_2', 'IB2c_3', 'CB3a_1'], ['CB3a_2', 'CB3a_3', 'CB3s', 'IB3b_1', 'IB3b_2', 'IB3b_3', 'IB3c_1', 'IB3c_2', 'IB3c_3', 'IB3d_1'], ['IB3d_2', 'IB3d_3', 'CB4a_1', 'CB4a_2', 'CB4a_3', 'CB4s', 'IB4b_1', 'IB4b_2', 'IB4b_3', 'IB4c_1', 'IB4c_2', 'IB4c_3', 'IB4d_1', 'IB4d_2', 'IB4d_3', 'IB4e_1', 'IB4e_2', 'IB4e_3', 'IB4f_1', 'IB4f_2', 'IB4f_3', 'RPN_Conv1', 'RPN_Conv_bbox', 'RPN_Conv3_cls']]","[66, 39, 108]",0.07065033912658691,0.7054671136953392,"[0, 0]",25.0,2,100,17280,allzeros
DRAM_cycle,2,3,FasterRCNN.csv,1,9.160285764274702e-07,3275007,4.2228695412021157e-07,2368058,2.1692087986376825,1.3829927307523717,"[['Conv1', 'CB2a_1', 'CB2a_2', 'CB2a_3', 'CB2s', 'IB2b_1', 'IB2b_2', 'IB2b_3', 'IB2c_1', 'IB2c_2', 'IB2c_3', 'CB3a_1'], ['CB3a_2', 'CB3a_3', 'CB3s', 'IB3b_1', 'IB3b_2', 'IB3b_3', 'IB3c_1', 'IB3c_2', 'IB3c_3', 'IB3d_1'], ['IB3d_2', 'IB3d_3', 'CB4a_1', 'CB4a_2', 'CB4a_3', 'CB4s', 'IB4b_1', 'IB4b_2', 'IB4b_3', 'IB4c_1', 'IB4c_2', 'IB4c_3', 'IB4d_1', 'IB4d_2', 'IB4d_3', 'IB4e_1', 'IB4e_2', 'IB4e_3', 'IB4f_1', 'IB4f_2', 'IB4f_3', 'RPN_Conv1', 'RPN_Conv_bbox', 'RPN_Conv3_cls']]","[66, 39, 108]",0.10741591453552246,0.6719565027292101,"[0, 0]",63.0,2,100,17280,allzeros
scaleSim/scale.py
View file @
fb06d64f
...
...
@@ -279,7 +279,7 @@ class scale:
print
(
all_arr_dim_list
)
pool
=
Pool
(
processes
=
12
)
pool
=
Pool
(
processes
=
4
)
for
pro
in
pool
.
imap_unordered
(
self
.
run_mp_once
,
all_arr_dim_list
):
self
.
run_name
=
net_name
+
"_"
+
self
.
dataflow
+
"_"
+
str
(
pro
[
0
])
+
"x"
+
str
(
pro
[
1
])
self
.
cleanup
(
pro
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment