Skip to content
GitLab
Explore
Sign in
Register
Primary navigation
Search or go to…
Project
S
Sciencebeam Gym
Manage
Activity
Members
Labels
Plan
Issues
0
Issue boards
Milestones
Iterations
Wiki
Requirements
Code
Merge requests
0
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Locked files
Build
Pipelines
Jobs
Pipeline schedules
Test cases
Artifacts
Deploy
Releases
Package Registry
Container Registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Code review analytics
Issue analytics
Insights
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Sciencebeam
Sciencebeam Gym
Commits
5432f53a
Commit
5432f53a
authored
7 years ago
by
Daniel Ecer
Browse files
Options
Downloads
Patches
Plain Diff
added test to confirm that loss function supports sample based weights
parent
71081bfd
No related branches found
No related tags found
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
sciencebeam_gym/trainer/models/pix2pix/loss.py
+6
-7
6 additions, 7 deletions
sciencebeam_gym/trainer/models/pix2pix/loss.py
sciencebeam_gym/trainer/models/pix2pix/loss_test.py
+44
-0
44 additions, 0 deletions
sciencebeam_gym/trainer/models/pix2pix/loss_test.py
with
50 additions
and
7 deletions
sciencebeam_gym/trainer/models/pix2pix/loss.py
+
6
−
7
View file @
5432f53a
...
...
@@ -14,12 +14,11 @@ def cross_entropy_loss(labels, logits):
)
)
def
weighted_cross_entropy_loss
(
targets
,
logits
,
pos_weight
):
def
weighted_cross_entropy_loss
(
targets
,
logits
,
pos_weight
,
scalar
=
True
):
with
tf
.
name_scope
(
"
weighted_cross_entropy
"
):
return
tf
.
reduce_mean
(
tf
.
nn
.
weighted_cross_entropy_with_logits
(
logits
=
logits
,
targets
=
targets
,
pos_weight
=
pos_weight
)
value
=
tf
.
nn
.
weighted_cross_entropy_with_logits
(
logits
=
logits
,
targets
=
targets
,
pos_weight
=
pos_weight
)
return
tf
.
reduce_mean
(
value
)
if
scalar
else
value
This diff is collapsed.
Click to expand it.
sciencebeam_gym/trainer/models/pix2pix/loss_test.py
+
44
−
0
View file @
5432f53a
import
logging
from
six
import
raise_from
import
tensorflow
as
tf
...
...
@@ -9,6 +11,9 @@ from sciencebeam_gym.trainer.models.pix2pix.loss import (
weighted_cross_entropy_loss
)
def
get_logger
():
return
logging
.
getLogger
(
__name__
)
def
assert_close
(
a
,
b
,
atol
=
1.e-8
):
try
:
assert
np
.
allclose
([
a
],
[
b
],
atol
=
atol
)
...
...
@@ -99,3 +104,42 @@ class TestWeightedCrossEntropyLoss(object):
with
tf
.
Session
()
as
session
:
loss_1_value
,
loss_2_value
=
session
.
run
([
loss_1
,
loss_2
])
assert
loss_1_value
<
loss_2_value
def
test_should_support_batch_example_pos_weights
(
self
):
batch_size
=
3
with
tf
.
Graph
().
as_default
():
labels
=
tf
.
constant
([[
0.0
,
1.0
]]
*
batch_size
)
logits
=
tf
.
constant
([[
10.0
,
10.0
]]
*
batch_size
)
pos_weight_1
=
tf
.
constant
([
[
0.5
,
0.5
],
[
1.0
,
1.0
],
[
1.0
,
1.0
]
])
pos_weight_2
=
tf
.
constant
([
[
1.0
,
1.0
],
[
0.5
,
0.5
],
[
1.0
,
1.0
]
])
loss_1
=
weighted_cross_entropy_loss
(
labels
,
logits
,
pos_weight_1
,
scalar
=
False
)
loss_2
=
weighted_cross_entropy_loss
(
labels
,
logits
,
pos_weight_2
,
scalar
=
False
)
loss_1_per_example
=
tf
.
reduce_mean
(
loss_1
,
axis
=
[
-
1
])
loss_2_per_example
=
tf
.
reduce_mean
(
loss_2
,
axis
=
[
-
1
])
with
tf
.
Session
()
as
session
:
get_logger
().
debug
(
'
labels=
\n
%s
'
,
labels
.
eval
())
get_logger
().
debug
(
'
logits=
\n
%s
'
,
logits
.
eval
())
loss_1_value
,
loss_2_value
,
loss_1_per_example_value
,
loss_2_per_example_value
=
(
session
.
run
([
loss_1
,
loss_2
,
loss_1_per_example
,
loss_2_per_example
])
)
get_logger
().
debug
(
'
\n
loss_1_value=
\n
%s
\n
loss_2_value=
\n
%s
'
'
\n
loss_1_per_example_value=
\n
%s
\n
loss_2_per_example_value=
\n
%s
'
,
loss_1_value
,
loss_2_value
,
loss_1_per_example_value
,
loss_2_per_example_value
)
assert
loss_1_per_example_value
[
0
]
<
loss_2_per_example_value
[
0
]
assert
loss_1_per_example_value
[
1
]
>
loss_2_per_example_value
[
1
]
assert
loss_1_per_example_value
[
2
]
==
loss_2_per_example_value
[
2
]
This diff is collapsed.
Click to expand it.
Preview
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment