You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@opennlp.apache.org by jo...@apache.org on 2018/12/13 09:24:27 UTC
[opennlp-sandbox] branch master updated: Add train dropout to
normalizer
This is an automated email from the ASF dual-hosted git repository.
joern pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/opennlp-sandbox.git
The following commit(s) were added to refs/heads/master by this push:
new f746c57 Add train dropout to normalizer
f746c57 is described below
commit f746c578c07688c092e47d77afcbfa4f2656dea2
Author: Jörn Kottmann <jo...@apache.org>
AuthorDate: Thu Dec 13 10:24:11 2018 +0100
Add train dropout to normalizer
---
tf-ner-poc/src/main/python/normalizer/normalizer.py | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/tf-ner-poc/src/main/python/normalizer/normalizer.py b/tf-ner-poc/src/main/python/normalizer/normalizer.py
index b4cc674..8286ce1 100644
--- a/tf-ner-poc/src/main/python/normalizer/normalizer.py
+++ b/tf-ner-poc/src/main/python/normalizer/normalizer.py
@@ -91,6 +91,9 @@ def create_graph(mode, batch_size, encoder_nchars, max_target_length, decoder_nc
encoder_emb_inp = tf.nn.embedding_lookup(encoder_embedding_weights, encoder_char_ids_ph)
+ if "TRAIN" == mode:
+ encoder_emb_inp = tf.nn.dropout(encoder_emb_inp, 0.7)
+
encoder_emb_inp = tf.transpose(encoder_emb_inp, perm=[1, 0, 2])
encoder_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units)
@@ -144,8 +147,6 @@ def create_graph(mode, batch_size, encoder_nchars, max_target_length, decoder_nc
outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder, output_time_major=True, swap_memory=True )
- # TODO: Use attention to improve training performance ...
-
logits = outputs.rnn_output
train_prediction = outputs.sample_id
@@ -232,7 +233,7 @@ def main():
eval_sess = tf.Session(graph=eval_graph)
- for epoch in range(1):
+ for epoch in range(20):
print("Epoch " + str(epoch))
with train_graph.as_default():