Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
wise-move
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Container Registry
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
wise-lab
wise-move
Commits
f443e382
Commit
f443e382
authored
6 years ago
by
Jae Young Lee
Browse files
Options
Downloads
Patches
Plain Diff
Wait (and the others slightly) improved.
parent
558f2efd
No related branches found
Branches containing commit
No related tags found
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
options/simple_intersection/maneuvers.py
+24
-19
24 additions, 19 deletions
options/simple_intersection/maneuvers.py
with
24 additions
and
19 deletions
options/simple_intersection/maneuvers.py
+
24
−
19
View file @
f443e382
...
...
@@ -172,7 +172,7 @@ class Stop(ManeuverBase):
self
.
_penalty_in_violation
))
self
.
_LTL_preconditions
.
append
(
LTLProperty
(
"
G ( not stopped_now U in_stop_region )
"
,
1
00
,
LTLProperty
(
"
G ( not stopped_now U in_stop_region )
"
,
2
00
,
self
.
_enable_low_level_training_properties
))
self
.
_LTL_preconditions
.
append
(
...
...
@@ -228,7 +228,7 @@ class Stop(ManeuverBase):
elif
(
rd
.
speed_limit
/
5
<
self
.
_v_ref
)
and
\
(
self
.
env
.
ego
.
v
<
self
.
_v_ref
/
2
)
and
self
.
env
.
ego
.
acc
<
0
:
self
.
_extra_r_terminal
=
-
1
00
self
.
_extra_r_terminal
=
-
2
00
return
True
else
:
...
...
@@ -241,13 +241,13 @@ class Stop(ManeuverBase):
class
Wait
(
ManeuverBase
):
_reward_in_goal
=
None
_terminate_in_goal
=
Tru
e
_terminate_in_goal
=
Fals
e
def
_init_LTL_preconditions
(
self
):
self
.
_LTL_preconditions
.
append
(
LTLProperty
(
"
G ( (in_stop_region and stopped_now) U (highest_priority and intersection_is_clear))
"
,
None
,
not
self
.
_enable_low_level_training_properties
))
# not available in low-level training...
LTLProperty
(
"
G ( (in_stop_region and has_stopped_in_stop_region) U (highest_priority and intersection_is_clear))
"
,
None
,
not
self
.
_enable_low_level_training_properties
))
# not available in low-level training...
self
.
_LTL_preconditions
.
append
(
LTLProperty
(
"
G ( not (in_intersection and highest_priority and intersection_is_clear) )
"
,
...
...
@@ -255,13 +255,13 @@ class Wait(ManeuverBase):
self
.
_LTL_preconditions
.
append
(
LTLProperty
(
"
G ( in_stop_region U (highest_priority and intersection_is_clear) )
"
,
15
0
,
"
G ( in_stop_region U (highest_priority and intersection_is_clear) )
"
,
20
0
,
self
.
_enable_low_level_training_properties
))
self
.
_LTL_preconditions
.
append
(
LTLProperty
(
"
G ( (lane and target_lane) or (not lane and not target_lane) )
"
,
150
,
self
.
_enable_low_level_training_properties
))
#
self._LTL_preconditions.append(
#
LTLProperty(
#
"G ( (lane and target_lane) or (not lane and not target_lane) )",
#
150, self._enable_low_level_training_properties))
def
_init_param
(
self
):
self
.
_update_param
()
...
...
@@ -270,8 +270,10 @@ class Wait(ManeuverBase):
def
_update_param
(
self
):
if
self
.
env
.
ego
.
APs
[
'
highest_priority
'
]
and
self
.
env
.
ego
.
APs
[
'
intersection_is_clear
'
]:
self
.
_v_ref
=
rd
.
speed_limit
self
.
_extra_action_weights_flag
=
True
else
:
self
.
_v_ref
=
0
self
.
_extra_action_weights_flag
=
False
def
generate_learning_scenario
(
self
):
n_others
=
0
if
np
.
random
.
rand
()
<=
0
else
np
.
random
.
randint
(
1
,
4
)
...
...
@@ -298,17 +300,20 @@ class Wait(ManeuverBase):
self
.
env
.
ego
.
waited_count
=
np
.
random
.
randint
(
min_waited_count
,
max_waited_count
+
21
)
self
.
env
.
init_APs
(
False
)
self
.
env
.
_terminate_in_goal
=
False
self
.
_reward_in_goal
=
200
self
.
_enable_low_level_training_properties
=
True
self
.
_extra_action_weights_flag
=
Tru
e
self
.
_extra_action_weights_flag
=
Fals
e
@property
def
extra_termination_condition
(
self
):
if
self
.
_enable_low_level_training_properties
:
# activated only for the low-level training.
if
self
.
env
.
ego
.
APs
[
'
highest_priority
'
]
and
self
.
env
.
ego
.
APs
[
'
intersection_is_clear
'
]
\
and
np
.
random
.
rand
()
<=
0.1
and
self
.
env
.
ego
.
v
<=
self
.
_v_ref
/
10
\
and
self
.
env
.
ego
.
acc
<
0
:
self
.
_extra_r_terminal
=
-
100
if
self
.
env
.
ego
.
APs
[
'
highest_priority
'
]
and
\
self
.
env
.
ego
.
APs
[
'
intersection_is_clear
'
]
and
\
np
.
random
.
rand
()
<=
0.25
and
\
self
.
env
.
ego
.
v
<=
self
.
_v_ref
/
10
and
\
self
.
env
.
ego
.
acc
<
0
:
self
.
_extra_r_terminal
=
-
200
return
True
else
:
self
.
_extra_r_terminal
=
None
...
...
@@ -449,6 +454,7 @@ class ChangeLane(ManeuverBase):
self
.
_violation_penalty_in_low_level_training
=
150
self
.
_enable_low_level_training_properties
=
True
self
.
_extra_action_weights_flag
=
True
self
.
env
.
_terminate_in_goal
=
False
def
generate_validation_scenario
(
self
):
self
.
generate_scenario
(
...
...
@@ -460,6 +466,8 @@ class ChangeLane(ManeuverBase):
self
.
_reward_in_goal
=
200
self
.
_violation_penalty_in_low_level_training
=
150
self
.
_enable_low_level_training_properties
=
True
self
.
env
.
_terminate_in_goal
=
False
@staticmethod
def
_features_dim_reduction
(
features_tuple
):
...
...
@@ -511,9 +519,6 @@ class Follow(ManeuverBase):
self
.
_enable_low_level_training_properties
=
True
self
.
_extra_action_weights_flag
=
True
def
generate_validation_scenario
(
self
):
self
.
generate_learning_scenario
()
def
_init_param
(
self
):
self
.
_set_v_ref
()
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment