Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
watcag-public
fpga-syspart
Commits
a28ebd22
Commit
a28ebd22
authored
Nov 02, 2020
by
justinborromeo
Browse files
Binary search partition size selection
parent
094d8523
Changes
2
Hide whitespace changes
Inline
Side-by-side
optimization_algo/approaches/cma_approach_square_size.py
View file @
a28ebd22
...
...
@@ -164,32 +164,47 @@ class cma_approach(object):
Decide partition sizes and evaluate.
"""
def
eva_hybrid_sq
(
self
,
layer
):
# res is a list of length # of partitions. Step size is 3
# res is a list corresponding to each partition. res_step is the
# minimum amount by which partition size increases.
res
=
[
self
.
res_step
]
*
self
.
k
latencies
=
[]
# TODO Change this to a binary search.
# max_res_unit = 1920*9*1 from sq_approach_faster
variable_max_res_unit
=
self
.
max_res_unit
# Generate initial solution
while
sum
([
r
*
r
for
r
in
res
])
<
variable_max_res_unit
:
latencies
,
max_idx
=
self
.
find_max_latency
(
layer
,
res
)
res
[
max_idx
]
+=
self
.
res_step
while
pp
.
packingPenalty
(
res
,
self
)
!=
0
:
while
sum
([
r
*
r
for
r
in
res
])
<
variable_max_res_unit
:
# Do a binary search to find the largest packable variable_max_res_unit.
search_upper_bound
=
self
.
max_res_unit
search_lower_bound
=
sum
([
r
*
r
for
r
in
res
])
while
not
search_upper_bound
>=
search_lower_bound
:
variable_max_res_unit
=
\
int
((
search_upper_bound
+
search_lower_bound
)
/
2
)
limit_reached
=
False
while
not
limit_reached
:
latencies
,
max_idx
=
self
.
find_max_latency
(
layer
,
res
)
res
[
max_idx
]
+=
self
.
res_step
variable_max_res_unit
-=
100
# If this addition puts the solution over the limit, we need to
# revert the last partition addition. TODO write some code to
# see if we can assign the remaining units.
if
sum
([
r
**
2
for
r
in
res
])
>
variable_max_res_unit
:
res
[
max_idx
]
-=
self
.
res_step
limit_reached
=
True
if
pp
.
isPackable
(
res
,
self
.
max_pack_size
):
# The desired max_res_unit value is greater than its current
# value.
search_lower_bound
=
variable_max_res_unit
else
:
# The desired max_res_unit value is less than its current
# value.
search_upper_bound
=
variable_max_res_unit
# Calculate latencies of final solution.
latencies
,
max_idx
=
self
.
find_max_latency
(
layer
,
res
)
# TODO we want to penalize based on how much we had to decrease
# variable_max_res_unit.
max_res_unit_decrease
=
self
.
max_res_unit
-
variable_max_res_unit
# If all layers couldn't be packed, packingPenalty returns 0.
packing_penalty
=
pp
.
packingPenalty
(
res
,
self
.
max_pack_size
)
packing_penalty
=
pp
.
calculatePackingPenalty
(
max_res_unit_decrease
)
return
latencies
[
max_idx
]
+
packing_penalty
,
latencies
,
res
,
layer
def
evaluation_top_level
(
self
,
in_val
):
...
...
@@ -306,7 +321,7 @@ class cma_approach(object):
return
False
,
result
layer
=
self
.
regroup_layers
(
self
.
best_layer
)
max_latency
,
latencies
,
res
,
layers
=
self
.
eva_hybrid_sq
(
layer
)
max_latency
,
latencies
,
res
,
_
=
self
.
eva_hybrid_sq
(
layer
)
else
:
if
not
self
.
filter_res
(
self
.
best_res
)
and
not
self
.
filter_layer
(
self
.
best_layer
):
#print("RESULT NOT VALID")
...
...
optimization_algo/approaches/packing_penalty.py
View file @
a28ebd22
...
...
@@ -4,35 +4,19 @@ from rectpack import GuillotineBssfSas
from
matplotlib
import
pyplot
as
plt
import
matplotlib.patches
as
patches
def
packingPenalty
(
partitions
,
full_array
):
# Given a list of partitions (square edge lengths) and a bin edge length,
# determine whether the partitions are packable using a Naive algorithm.
def
isPackable
(
partitions
,
full_array
):
packer
=
newPacker
(
pack_algo
=
MaxRectsBssf
)
for
p
in
partitions
:
packer
.
add_rect
(
p
,
p
)
packer
.
add_bin
(
full_array
,
full_array
)
packer
.
pack
()
# First and only bin
bin_area
=
packer
[
0
].
width
*
packer
[
0
].
height
packed_area
=
0
for
rect
in
packer
[
0
]:
side_length
=
rect
.
width
square_area
=
side_length
**
2
packed_area
+=
square_area
percentage_wasted
=
100
*
(
bin_area
-
packed_area
)
/
bin_area
if
len
(
packer
[
0
])
==
len
(
partitions
):
return
0
# Fully packed, no penalty
# Small 0.0001 offset so a fully-packed bin with non-fully packed layers
# isn't treated as 0-penalty.
return
penaltyFunction
(
percentage_wasted
)
+
0.001
PENALTY_CONSTANT
=
40000
return
len
(
packer
[
0
])
==
len
(
partitions
)
def
penaltyFunction
(
percentage_wasted
):
# 0 percent wasted -> 0 penalty
# 5 percent wasted -> 200000
return
PENALTY_CONSTANT
*
percentage_wasted
PENALTY_CONSTANT
=
150
def
calculatePackingPenalty
(
max_res_unit_decrement
):
return
max_res_unit_decrement
*
PENALTY_CONSTANT
def
printPNG
(
partitions
,
full_array
,
filename
=
"sth.png"
):
# print(partitions, full_array)
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment