首页 » 技术分享 » 英文文本分类(CNN)

英文文本分类(CNN)

 

网络结构参照如下图:

数据预处理阶段:

import numpy as np
import re
import itertools
from collections import Counter


def clean_str(string):
    """
    Tokenization/string cleaning for all datasets except for SST.
    Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
    """
    string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
    string = re.sub(r"\'s", " \'s", string)
    string = re.sub(r"\'ve", " \'ve", string)
    string = re.sub(r"n\'t", " n\'t", string)
    string = re.sub(r"\'re", " \'re", string)
    string = re.sub(r"\'d", " \'d", string)
    string = re.sub(r"\'ll", " \'ll", string)
    string = re.sub(r",", " , ", string)
    string = re.sub(r"!", " ! ", string)
    string = re.sub(r"\(", " \( ", string)
    string = re.sub(r"\)", " \) ", string)
    string = re.sub(r"\?", " \? ", string)
    string = re.sub(r"\s{2,}", " ", string)
    return string.strip().lower()


def load_data_and_labels(positive_data_file, negative_data_file):
    """
    Loads MR polarity data from files, splits the data into words and generates labels.
    Returns split sentences and labels.
    """
    # Load data from files
    
    
    positive = open(positive_data_file, "rb").read().decode('utf-8')
    negative = open(negative_data_file, "rb").read().decode('utf-8')
    
    positive_examples = positive.split('\n')[:-1]
    negative_examples = negative.split('\n')[:-1]
    
    positive_examples = [s.strip() for s in positive_examples]
    negative_examples = [s.strip() for s in negative_examples]
    
    #positive_examples = list(open(positive_data_file, "rb").read().decode('utf-8'))
    #positive_examples = [s.strip() for s in positive_examples]
    #negative_examples = list(open(negative_data_file, "rb").read().decode('utf-8'))
    #negative_examples = [s.strip() for s in negative_examples]
    # Split by words
    x_text = positive_examples + negative_examples
    x_text = [clean_str(sent) for sent in x_text]
    # Generate labels
    positive_labels = [[0, 1] for _ in positive_examples]
    negative_labels = [[1, 0] for _ in negative_examples]
    y = np.concatenate([positive_labels, negative_labels], 0)
    return [x_text, y]


def batch_iter(data, batch_size, num_epochs, shuffle=True):
    """
    Generates a batch iterator for a dataset.
    """
    data = np.array(data)
    data_size = len(data)
    num_batches_per_epoch = int((len(data)-1)/batch_size) + 1
    for epoch in range(num_epochs):
        # Shuffle the data at each epoch
        if shuffle:
            shuffle_indices = np.random.permutation(np.arange(data_size))
            shuffled_data = data[shuffle_indices]
        else:
            shuffled_data = data
        for batch_num in range(num_batches_per_epoch):
            start_index = batch_num * batch_size
            end_index = min((batch_num + 1) * batch_size, data_size)
            yield shuffled_data[start_index:end_index]
#! /usr/bin/env python

import tensorflow as tf
import numpy as np
import os
import time
import datetime
import data_helpers
from text_cnn import TextCNN
from tensorflow.contrib import learn
import csv

# Parameters
# ==================================================

# Data Parameters
tf.flags.DEFINE_string("positive_data_file", "./data/rt-polaritydata/rt-polarity.pos", "Data source for the positive data.")
tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the positive data.")

# Eval Parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_string("checkpoint_dir", "./", "Checkpoint directory from training run")
tf.flags.DEFINE_boolean("eval_train", False, "Evaluate on all training data")
tf.app.flags.DEFINE_string('f', '', 'kernel')

# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")


FLAGS = tf.flags.FLAGS
FLAGS.flag_values_dict()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
    print("{}={}".format(attr.upper(), value))
print("")

# CHANGE THIS: Load data. Load your own data here
if FLAGS.eval_train:
    x_raw, y_test = data_helpers.load_data_and_labels(FLAGS.positive_data_file, FLAGS.negative_data_file)
    y_test = np.argmax(y_test, axis=1)
else:
    x_raw = ["a masterpiece four years in the making", "everything is off."]
    y_test = [1, 0]

# Map data into vocabulary
vocab_path = os.path.join(FLAGS.checkpoint_dir, "..", "vocab")
vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)
x_test = np.array(list(vocab_processor.transform(x_raw)))

print("\nEvaluating...\n")

# Evaluation
# ==================================================
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
graph = tf.Graph()
with graph.as_default():
    session_conf = tf.ConfigProto(
      allow_soft_placement=FLAGS.allow_soft_placement,
      log_device_placement=FLAGS.log_device_placement)
    sess = tf.Session(config=session_conf)
    with sess.as_default():
        # Load the saved meta graph and restore variables
        saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
        saver.restore(sess, checkpoint_file)

        # Get the placeholders from the graph by name
        input_x = graph.get_operation_by_name("input_x").outputs[0]
        # input_y = graph.get_operation_by_name("input_y").outputs[0]
        dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]

        # Tensors we want to evaluate
        predictions = graph.get_operation_by_name("output/predictions").outputs[0]

        # Generate batches for one epoch
        batches = data_helpers.batch_iter(list(x_test), FLAGS.batch_size, 1, shuffle=False)

        # Collect the predictions here
        all_predictions = []

        for x_test_batch in batches:
            batch_predictions = sess.run(predictions, {input_x: x_test_batch, dropout_keep_prob: 1.0})
            all_predictions = np.concatenate([all_predictions, batch_predictions])

# Print accuracy if y_test is defined
if y_test is not None:
    correct_predictions = float(sum(all_predictions == y_test))
    print("Total number of test examples: {}".format(len(y_test)))
    print("Accuracy: {:g}".format(correct_predictions/float(len(y_test))))

# Save the evaluation to a csv
predictions_human_readable = np.column_stack((np.array(x_raw), all_predictions))
out_path = os.path.join(FLAGS.checkpoint_dir, "..", "prediction.csv")
print("Saving evaluation to {0}".format(out_path))
with open(out_path, 'w') as f:
    csv.writer(f).writerows(predictions_human_readable)
import tensorflow as tf
import numpy as np


class TextCNN(object):
    """
    A CNN for text classification.
    Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer.
    """
    def __init__(
      self, sequence_length, num_classes, vocab_size,
      embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0):

        # Placeholders for input, output and dropout
        self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
        self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
        self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")

        # Keeping track of l2 regularization loss (optional)
        l2_loss = tf.constant(0.0)

        # Embedding layer
        with tf.device('/cpu:0'), tf.name_scope("embedding"):
            self.W = tf.Variable(
                tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
                name="W")
            self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)
            self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)

        # Create a convolution + maxpool layer for each filter size
        pooled_outputs = []
        for i, filter_size in enumerate(filter_sizes):
            with tf.name_scope("conv-maxpool-%s" % filter_size):
                # Convolution Layer
                filter_shape = [filter_size, embedding_size, 1, num_filters]
                W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
                b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
                conv = tf.nn.conv2d(
                    self.embedded_chars_expanded,
                    W,
                    strides=[1, 1, 1, 1],
                    padding="VALID",
                    name="conv")
                # Apply nonlinearity
                h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
                # Maxpooling over the outputs
                pooled = tf.nn.max_pool(
                    h,
                    ksize=[1, sequence_length - filter_size + 1, 1, 1],
                    strides=[1, 1, 1, 1],
                    padding='VALID',
                    name="pool")
                pooled_outputs.append(pooled)

        # Combine all the pooled features
        num_filters_total = num_filters * len(filter_sizes)
        #self.h_pool = tf.concat(pooled_outputs, 3)
        self.h_pool = tf.concat(pooled_outputs,3)
        self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])

        # Add dropout
        with tf.name_scope("dropout"):
            self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)

        # Final (unnormalized) scores and predictions
        with tf.name_scope("output"):
            W = tf.get_variable(
                "W",
                shape=[num_filters_total, num_classes],
                initializer=tf.contrib.layers.xavier_initializer())
            b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
            l2_loss += tf.nn.l2_loss(W)
            l2_loss += tf.nn.l2_loss(b)
            self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
            self.predictions = tf.argmax(self.scores, 1, name="predictions")

        # CalculateMean cross-entropy loss
        with tf.name_scope("loss"):
            losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
            self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss

        # Accuracy
        with tf.name_scope("accuracy"):
            correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
            self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")

接下来是训练阶段:

#! /usr/bin/env python

import tensorflow as tf
import numpy as np
import os
import time
import datetime
import data_helpers
from text_cnn import TextCNN
from tensorflow.contrib import learn

# Parameters
# ==================================================

# Data loading params
tf.flags.DEFINE_float("dev_sample_percentage", .1, "Percentage of the training data to use for validation")
tf.flags.DEFINE_string("positive_data_file", "./data/rt-polaritydata/rt-polarity.pos", "Data source for the positive data.")
tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the negative data.")

# Model Hyperparameters
tf.flags.DEFINE_integer("embedding_dim", 128, "Dimensionality of character embedding (default: 128)")
tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')")
tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularization lambda (default: 0.0)")

tf.app.flags.DEFINE_string('f', '', 'kernel')

# Training parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_epochs", 200, "Number of training epochs (default: 200)")
tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this many steps (default: 100)")
tf.flags.DEFINE_integer("checkpoint_every", 100, "Save model after this many steps (default: 100)")
tf.flags.DEFINE_integer("num_checkpoints", 5, "Number of checkpoints to store (default: 5)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")

FLAGS = tf.flags.FLAGS
FLAGS.flag_values_dict()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
    print("{}={}".format(attr.upper(), value))
print("")


# Data Preparation
# ==================================================

# Load data
print("Loading data...")
x_text, y = data_helpers.load_data_and_labels(FLAGS.positive_data_file, FLAGS.negative_data_file)

# Build vocabulary
max_document_length = max([len(x.split(" ")) for x in x_text])
vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
x = np.array(list(vocab_processor.fit_transform(x_text)))

# Randomly shuffle data
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(len(y)))
x_shuffled = x[shuffle_indices]
y_shuffled = y[shuffle_indices]

# Split train/test set
# TODO: This is very crude, should use cross-validation
dev_sample_index = -1 * int(FLAGS.dev_sample_percentage * float(len(y)))
x_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]
y_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]
print("Vocabulary Size: {:d}".format(len(vocab_processor.vocabulary_)))
print("Train/Dev split: {:d}/{:d}".format(len(y_train), len(y_dev)))


# Training
# ==================================================

with tf.Graph().as_default():
    session_conf = tf.ConfigProto(
      allow_soft_placement=FLAGS.allow_soft_placement,
      log_device_placement=FLAGS.log_device_placement)
    sess = tf.Session(config=session_conf)
    with sess.as_default():
        cnn = TextCNN(
            sequence_length=x_train.shape[1],
            num_classes=y_train.shape[1],
            vocab_size=len(vocab_processor.vocabulary_),
            embedding_size=FLAGS.embedding_dim,
            filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))),
            num_filters=FLAGS.num_filters,
            l2_reg_lambda=FLAGS.l2_reg_lambda)

        # Define Training procedure
        global_step = tf.Variable(0, name="global_step", trainable=False)
        optimizer = tf.train.AdamOptimizer(1e-3)
        grads_and_vars = optimizer.compute_gradients(cnn.loss)
        train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)

        # Keep track of gradient values and sparsity (optional)
        grad_summaries = []
        for g, v in grads_and_vars:
            if g is not None:
                grad_hist_summary = tf.summary.histogram("{}/grad/hist".format(v.name), g)
                sparsity_summary = tf.summary.scalar("{}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
                grad_summaries.append(grad_hist_summary)
                grad_summaries.append(sparsity_summary)
        grad_summaries_merged = tf.summary.merge(grad_summaries)

        # Output directory for models and summaries
        timestamp = str(int(time.time()))
        out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
        print("Writing to {}\n".format(out_dir))

        # Summaries for loss and accuracy
        loss_summary = tf.summary.scalar("loss", cnn.loss)
        acc_summary = tf.summary.scalar("accuracy", cnn.accuracy)

        # Train Summaries
        train_summary_op = tf.summary.merge([loss_summary, acc_summary, grad_summaries_merged])
        train_summary_dir = os.path.join(out_dir, "summaries", "train")
        train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)

        # Dev summaries
        dev_summary_op = tf.summary.merge([loss_summary, acc_summary])
        dev_summary_dir = os.path.join(out_dir, "summaries", "dev")
        dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)

        # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
        checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
        checkpoint_prefix = os.path.join(checkpoint_dir, "model")
        if not os.path.exists(checkpoint_dir):
            os.makedirs(checkpoint_dir)
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.num_checkpoints)

        # Write vocabulary
        vocab_processor.save(os.path.join(out_dir, "vocab"))

        # Initialize all variables
        sess.run(tf.global_variables_initializer())

        def train_step(x_batch, y_batch):
            """
            A single training step
            """
            feed_dict = {
              cnn.input_x: x_batch,
              cnn.input_y: y_batch,
              cnn.dropout_keep_prob: FLAGS.dropout_keep_prob
            }
            _, step, summaries, loss, accuracy = sess.run(
                [train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],
                feed_dict)
            time_str = datetime.datetime.now().isoformat()
            print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
            train_summary_writer.add_summary(summaries, step)

        def dev_step(x_batch, y_batch, writer=None):
            """
            Evaluates model on a dev set
            """
            feed_dict = {
              cnn.input_x: x_batch,
              cnn.input_y: y_batch,
              cnn.dropout_keep_prob: 1.0
            }
            step, summaries, loss, accuracy = sess.run(
                [global_step, dev_summary_op, cnn.loss, cnn.accuracy],
                feed_dict)
            time_str = datetime.datetime.now().isoformat()
            print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
            if writer:
                writer.add_summary(summaries, step)

        # Generate batches
        batches = data_helpers.batch_iter(
            list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)
        # Training loop. For each batch...
        for batch in batches:
            x_batch, y_batch = zip(*batch)
            train_step(x_batch, y_batch)
            current_step = tf.train.global_step(sess, global_step)
            if current_step % FLAGS.evaluate_every == 0:
                print("\nEvaluation:")
                dev_step(x_dev, y_dev, writer=dev_summary_writer)
                print("")
            if current_step % FLAGS.checkpoint_every == 0:
                path = saver.save(sess, './', global_step=current_step)
                print("Saved model checkpoint to {}\n".format(path))

效果展示:

Parameters:
ALLOW_SOFT_PLACEMENT=<absl.flags._flag.BooleanFlag object at 0x000000000CB90CF8>
BATCH_SIZE=<absl.flags._flag.Flag object at 0x000000000CB90AC8>
CHECKPOINT_EVERY=<absl.flags._flag.Flag object at 0x000000000CB90CC0>
DEV_SAMPLE_PERCENTAGE=<absl.flags._flag.Flag object at 0x0000000004F07710>
DROPOUT_KEEP_PROB=<absl.flags._flag.Flag object at 0x000000000CB908D0>
EMBEDDING_DIM=<absl.flags._flag.Flag object at 0x000000000CB906D8>
EVALUATE_EVERY=<absl.flags._flag.Flag object at 0x000000000CB90C18>
F=<absl.flags._flag.Flag object at 0x000000000CB90A20>
FILTER_SIZES=<absl.flags._flag.Flag object at 0x000000000CB90780>
L2_REG_LAMBDA=<absl.flags._flag.Flag object at 0x000000000CB90978>
LOG_DEVICE_PLACEMENT=<absl.flags._flag.BooleanFlag object at 0x000000000CB90DA0>
NEGATIVE_DATA_FILE=<absl.flags._flag.Flag object at 0x000000000BDDF438>
NUM_CHECKPOINTS=<absl.flags._flag.Flag object at 0x000000000CB90D68>
NUM_EPOCHS=<absl.flags._flag.Flag object at 0x000000000CB90B70>
NUM_FILTERS=<absl.flags._flag.Flag object at 0x000000000CB90828>
POSITIVE_DATA_FILE=<absl.flags._flag.Flag object at 0x000000000BDDF3C8>

Loading data...
WARNING:tensorflow:From <ipython-input-1-bbb57832f2ce>:56: VocabularyProcessor.__init__ (from tensorflow.contrib.learn.python.learn.preprocessing.text) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tensorflow/transform or tf.data.
WARNING:tensorflow:From D:\anaconda\envs\tensorflow\lib\site-packages\tensorflow\contrib\learn\python\learn\preprocessing\text.py:154: CategoricalVocabulary.__init__ (from tensorflow.contrib.learn.python.learn.preprocessing.categorical_vocabulary) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tensorflow/transform or tf.data.
WARNING:tensorflow:From D:\anaconda\envs\tensorflow\lib\site-packages\tensorflow\contrib\learn\python\learn\preprocessing\text.py:170: tokenizer (from tensorflow.contrib.learn.python.learn.preprocessing.text) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tensorflow/transform or tf.data.
Vocabulary Size: 18758
Train/Dev split: 9596/1066
WARNING:tensorflow:From C:\Users\Administrator\Desktop\text_cnn.py:79: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.
Instructions for updating:

Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.

See @{tf.nn.softmax_cross_entropy_with_logits_v2}.

INFO:tensorflow:Summary name embedding/W:0/grad/hist is illegal; using embedding/W_0/grad/hist instead.
INFO:tensorflow:Summary name embedding/W:0/grad/sparsity is illegal; using embedding/W_0/grad/sparsity instead.
INFO:tensorflow:Summary name conv-maxpool-3/W:0/grad/hist is illegal; using conv-maxpool-3/W_0/grad/hist instead.
INFO:tensorflow:Summary name conv-maxpool-3/W:0/grad/sparsity is illegal; using conv-maxpool-3/W_0/grad/sparsity instead.
INFO:tensorflow:Summary name conv-maxpool-3/b:0/grad/hist is illegal; using conv-maxpool-3/b_0/grad/hist instead.
INFO:tensorflow:Summary name conv-maxpool-3/b:0/grad/sparsity is illegal; using conv-maxpool-3/b_0/grad/sparsity instead.
INFO:tensorflow:Summary name conv-maxpool-4/W:0/grad/hist is illegal; using conv-maxpool-4/W_0/grad/hist instead.
INFO:tensorflow:Summary name conv-maxpool-4/W:0/grad/sparsity is illegal; using conv-maxpool-4/W_0/grad/sparsity instead.
INFO:tensorflow:Summary name conv-maxpool-4/b:0/grad/hist is illegal; using conv-maxpool-4/b_0/grad/hist instead.
INFO:tensorflow:Summary name conv-maxpool-4/b:0/grad/sparsity is illegal; using conv-maxpool-4/b_0/grad/sparsity instead.
INFO:tensorflow:Summary name conv-maxpool-5/W:0/grad/hist is illegal; using conv-maxpool-5/W_0/grad/hist instead.
INFO:tensorflow:Summary name conv-maxpool-5/W:0/grad/sparsity is illegal; using conv-maxpool-5/W_0/grad/sparsity instead.
INFO:tensorflow:Summary name conv-maxpool-5/b:0/grad/hist is illegal; using conv-maxpool-5/b_0/grad/hist instead.
INFO:tensorflow:Summary name conv-maxpool-5/b:0/grad/sparsity is illegal; using conv-maxpool-5/b_0/grad/sparsity instead.
INFO:tensorflow:Summary name W:0/grad/hist is illegal; using W_0/grad/hist instead.
INFO:tensorflow:Summary name W:0/grad/sparsity is illegal; using W_0/grad/sparsity instead.
INFO:tensorflow:Summary name output/b:0/grad/hist is illegal; using output/b_0/grad/hist instead.
INFO:tensorflow:Summary name output/b:0/grad/sparsity is illegal; using output/b_0/grad/sparsity instead.
Writing to C:\Users\Administrator\Desktop\runs\1536738315

2018-09-12T15:45:19.915906: step 1, loss 2.49389, acc 0.4375
2018-09-12T15:45:20.265926: step 2, loss 1.79032, acc 0.53125
2018-09-12T15:45:20.564943: step 3, loss 2.14735, acc 0.4375
2018-09-12T15:45:20.908963: step 4, loss 1.4538, acc 0.53125
2018-09-12T15:45:21.233982: step 5, loss 2.09522, acc 0.546875
2018-09-12T15:45:21.547000: step 6, loss 1.6122, acc 0.515625
2018-09-12T15:45:21.883019: step 7, loss 1.7609, acc 0.5625
2018-09-12T15:45:22.201037: step 8, loss 2.31901, acc 0.40625
2018-09-12T15:45:22.543057: step 9, loss 1.61211, acc 0.546875
2018-09-12T15:45:22.969081: step 10, loss 2.31761, acc 0.390625
2018-09-12T15:45:23.376104: step 11, loss 1.6651, acc 0.546875
2018-09-12T15:45:23.835131: step 12, loss 2.12476, acc 0.515625
2018-09-12T15:45:24.247154: step 13, loss 1.81063, acc 0.515625
2018-09-12T15:45:24.580173: step 14, loss 2.00943, acc 0.53125
2018-09-12T15:45:24.899191: step 15, loss 2.44759, acc 0.40625
2018-09-12T15:45:25.220210: step 16, loss 2.05808, acc 0.5
2018-09-12T15:45:25.547228: step 17, loss 1.72565, acc 0.515625
2018-09-12T15:45:25.932250: step 18, loss 1.9766, acc 0.5625
2018-09-12T15:45:26.248269: step 19, loss 2.1489, acc 0.40625
2018-09-12T15:45:26.568287: step 20, loss 1.8569, acc 0.53125
2018-09-12T15:45:26.901306: step 21, loss 1.9758, acc 0.53125
2018-09-12T15:45:27.213324: step 22, loss 1.64478, acc 0.6875
2018-09-12T15:45:27.577345: step 23, loss 1.85216, acc 0.546875
2018-09-12T15:45:27.892363: step 24, loss 1.83535, acc 0.515625
2018-09-12T15:45:28.206381: step 25, loss 1.36567, acc 0.5625
2018-09-12T15:45:28.578402: step 26, loss 1.84147, acc 0.4375
2018-09-12T15:45:29.037027: step 27, loss 1.48389, acc 0.515625
2018-09-12T15:45:29.459051: step 28, loss 2.30555, acc 0.390625
2018-09-12T15:45:29.909077: step 29, loss 2.49438, acc 0.46875
2018-09-12T15:45:30.413106: step 30, loss 1.60412, acc 0.5
2018-09-12T15:45:30.972138: step 31, loss 2.20106, acc 0.4375
2018-09-12T15:45:31.541170: step 32, loss 1.77118, acc 0.546875
2018-09-12T15:45:32.113203: step 33, loss 1.69977, acc 0.515625
2018-09-12T15:45:32.608231: step 34, loss 1.92535, acc 0.484375
2018-09-12T15:45:33.129261: step 35, loss 2.1763, acc 0.4375
2018-09-12T15:45:33.681293: step 36, loss 1.42474, acc 0.59375
2018-09-12T15:45:34.186322: step 37, loss 2.18221, acc 0.359375
2018-09-12T15:45:34.694351: step 38, loss 1.21661, acc 0.625
2018-09-12T15:45:35.159377: step 39, loss 1.76056, acc 0.515625
2018-09-12T15:45:35.565401: step 40, loss 1.75754, acc 0.484375
2018-09-12T15:45:36.059429: step 41, loss 1.5117, acc 0.53125
2018-09-12T15:45:36.515455: step 42, loss 1.32369, acc 0.609375
2018-09-12T15:45:37.026484: step 43, loss 1.52577, acc 0.609375
2018-09-12T15:45:37.424507: step 44, loss 1.87095, acc 0.46875
2018-09-12T15:45:37.885533: step 45, loss 2.07323, acc 0.484375
2018-09-12T15:45:38.327559: step 46, loss 1.50184, acc 0.53125
2018-09-12T15:45:38.872590: step 47, loss 1.60472, acc 0.5625
2018-09-12T15:45:39.433622: step 48, loss 2.02982, acc 0.359375
2018-09-12T15:45:39.912649: step 49, loss 1.31676, acc 0.578125
2018-09-12T15:45:40.396677: step 50, loss 1.44574, acc 0.53125
2018-09-12T15:45:40.892705: step 51, loss 1.45929, acc 0.515625
2018-09-12T15:45:41.318730: step 52, loss 1.42193, acc 0.578125
2018-09-12T15:45:41.756755: step 53, loss 1.23416, acc 0.625
2018-09-12T15:45:42.246783: step 54, loss 1.52605, acc 0.5
2018-09-12T15:45:42.743811: step 55, loss 1.72285, acc 0.578125
2018-09-12T15:45:43.250840: step 56, loss 1.85015, acc 0.5
2018-09-12T15:45:43.696866: step 57, loss 1.75297, acc 0.4375
2018-09-12T15:45:44.117890: step 58, loss 2.03166, acc 0.4375
2018-09-12T15:45:44.581916: step 59, loss 1.57697, acc 0.46875
2018-09-12T15:45:45.024942: step 60, loss 1.65351, acc 0.578125
2018-09-12T15:45:45.465967: step 61, loss 1.59669, acc 0.546875
2018-09-12T15:45:45.920993: step 62, loss 1.37167, acc 0.59375
2018-09-12T15:45:46.359018: step 63, loss 1.33928, acc 0.546875
2018-09-12T15:45:46.786042: step 64, loss 1.48064, acc 0.546875
2018-09-12T15:45:47.226068: step 65, loss 1.10133, acc 0.6875
2018-09-12T15:45:47.653092: step 66, loss 1.65483, acc 0.484375
2018-09-12T15:45:48.081116: step 67, loss 1.59126, acc 0.546875
2018-09-12T15:45:48.516141: step 68, loss 1.49775, acc 0.484375
2018-09-12T15:45:48.995768: step 69, loss 1.53814, acc 0.46875
2018-09-12T15:45:49.407791: step 70, loss 1.60685, acc 0.484375
2018-09-12T15:45:49.793814: step 71, loss 1.62593, acc 0.453125
2018-09-12T15:45:50.188836: step 72, loss 1.71958, acc 0.453125
2018-09-12T15:45:50.581859: step 73, loss 1.71939, acc 0.453125
2018-09-12T15:45:50.993882: step 74, loss 1.26738, acc 0.546875
2018-09-12T15:45:51.388905: step 75, loss 1.47475, acc 0.53125
2018-09-12T15:45:51.787928: step 76, loss 1.19995, acc 0.53125
2018-09-12T15:45:52.185950: step 77, loss 1.58416, acc 0.640625
2018-09-12T15:45:52.498968: step 78, loss 1.31349, acc 0.484375
2018-09-12T15:45:52.806986: step 79, loss 1.41529, acc 0.5625
2018-09-12T15:45:53.119004: step 80, loss 1.8122, acc 0.5
2018-09-12T15:45:53.430621: step 81, loss 1.30463, acc 0.578125
2018-09-12T15:45:53.739638: step 82, loss 1.39164, acc 0.515625
2018-09-12T15:45:54.082658: step 83, loss 1.85096, acc 0.40625
2018-09-12T15:45:54.392676: step 84, loss 1.26449, acc 0.53125
2018-09-12T15:45:54.706694: step 85, loss 1.30223, acc 0.46875
2018-09-12T15:45:55.047713: step 86, loss 1.50947, acc 0.5
2018-09-12T15:45:55.436735: step 87, loss 1.4614, acc 0.484375
2018-09-12T15:45:55.825758: step 88, loss 1.63457, acc 0.5
2018-09-12T15:45:56.213780: step 89, loss 1.32075, acc 0.578125
2018-09-12T15:45:56.591802: step 90, loss 1.79241, acc 0.5
2018-09-12T15:45:56.985824: step 91, loss 0.9025, acc 0.671875
2018-09-12T15:45:57.403848: step 92, loss 1.36028, acc 0.578125
2018-09-12T15:45:57.789870: step 93, loss 1.25121, acc 0.609375
2018-09-12T15:45:58.187893: step 94, loss 1.04273, acc 0.546875
2018-09-12T15:45:58.499911: step 95, loss 1.47996, acc 0.46875
2018-09-12T15:45:58.816929: step 96, loss 1.29774, acc 0.515625
2018-09-12T15:45:59.131947: step 97, loss 1.25439, acc 0.5
2018-09-12T15:45:59.472966: step 98, loss 0.923111, acc 0.59375
2018-09-12T15:45:59.870989: step 99, loss 1.62195, acc 0.46875
2018-09-12T15:46:00.262011: step 100, loss 1.36909, acc 0.484375

Evaluation:
2018-09-12T15:46:01.525084: step 100, loss 0.806849, acc 0.566604

Saved model checkpoint to ./-100

2018-09-12T15:46:05.016883: step 101, loss 1.30262, acc 0.546875
2018-09-12T15:46:05.323900: step 102, loss 1.13589, acc 0.546875
2018-09-12T15:46:05.663118: step 103, loss 1.57138, acc 0.5
2018-09-12T15:46:05.974136: step 104, loss 1.46795, acc 0.484375
2018-09-12T15:46:06.282153: step 105, loss 1.47153, acc 0.515625
2018-09-12T15:46:06.596171: step 106, loss 1.17069, acc 0.625
2018-09-12T15:46:06.917189: step 107, loss 1.15621, acc 0.625
2018-09-12T15:46:07.274210: step 108, loss 1.38407, acc 0.515625
2018-09-12T15:46:07.642429: step 109, loss 1.38906, acc 0.53125
2018-09-12T15:46:07.976647: step 110, loss 1.42858, acc 0.578125
2018-09-12T15:46:08.324667: step 111, loss 1.7645, acc 0.46875
2018-09-12T15:46:08.864896: step 112, loss 1.77474, acc 0.40625
2018-09-12T15:46:09.331922: step 113, loss 0.871553, acc 0.609375
2018-09-12T15:46:09.738946: step 114, loss 1.77809, acc 0.546875
2018-09-12T15:46:10.160970: step 115, loss 1.1508, acc 0.453125
2018-09-12T15:46:10.582994: step 116, loss 1.4218, acc 0.515625
2018-09-12T15:46:11.016019: step 117, loss 1.76443, acc 0.484375
2018-09-12T15:46:11.458044: step 118, loss 1.6049, acc 0.484375
2018-09-12T15:46:11.857067: step 119, loss 1.15311, acc 0.625
2018-09-12T15:46:12.244089: step 120, loss 1.16616, acc 0.546875
2018-09-12T15:46:12.632111: step 121, loss 1.2782, acc 0.40625
2018-09-12T15:46:13.021133: step 122, loss 1.42503, acc 0.5625
2018-09-12T15:46:13.406155: step 123, loss 1.46773, acc 0.5
2018-09-12T15:46:13.811179: step 124, loss 1.44432, acc 0.53125
2018-09-12T15:46:14.190200: step 125, loss 1.25051, acc 0.546875
2018-09-12T15:46:14.575222: step 126, loss 1.44252, acc 0.453125
2018-09-12T15:46:14.984246: step 127, loss 1.33523, acc 0.53125
2018-09-12T15:46:15.383269: step 128, loss 1.10193, acc 0.578125
2018-09-12T15:46:15.779291: step 129, loss 1.02298, acc 0.640625
2018-09-12T15:46:16.163313: step 130, loss 1.38119, acc 0.484375
2018-09-12T15:46:16.556336: step 131, loss 1.2598, acc 0.625
2018-09-12T15:46:16.938357: step 132, loss 1.60941, acc 0.515625
2018-09-12T15:46:17.327380: step 133, loss 1.67511, acc 0.484375
2018-09-12T15:46:17.725403: step 134, loss 1.34802, acc 0.625
2018-09-12T15:46:18.121425: step 135, loss 0.751159, acc 0.671875
2018-09-12T15:46:18.562450: step 136, loss 0.972137, acc 0.640625
2018-09-12T15:46:19.006476: step 137, loss 1.03113, acc 0.609375
2018-09-12T15:46:19.436500: step 138, loss 1.07965, acc 0.578125
2018-09-12T15:46:19.878526: step 139, loss 1.04783, acc 0.546875
2018-09-12T15:46:20.333552: step 140, loss 1.29611, acc 0.5
2018-09-12T15:46:20.792578: step 141, loss 1.26341, acc 0.65625
2018-09-12T15:46:21.183600: step 142, loss 1.25453, acc 0.59375
2018-09-12T15:46:21.573623: step 143, loss 1.08882, acc 0.5
2018-09-12T15:46:21.968645: step 144, loss 0.924139, acc 0.625
2018-09-12T15:46:22.358668: step 145, loss 1.19739, acc 0.515625
2018-09-12T15:46:22.750690: step 146, loss 0.900556, acc 0.65625
2018-09-12T15:46:23.128712: step 147, loss 0.86868, acc 0.625
2018-09-12T15:46:23.515734: step 148, loss 1.18604, acc 0.515625
2018-09-12T15:46:23.911756: step 149, loss 0.933233, acc 0.5
2018-09-12T15:46:24.348781: step 150, loss 1.00867, acc 0.566667
2018-09-12T15:46:24.736804: step 151, loss 0.795481, acc 0.65625
2018-09-12T15:46:25.041821: step 152, loss 1.07659, acc 0.59375
2018-09-12T15:46:25.351839: step 153, loss 0.966723, acc 0.609375
2018-09-12T15:46:25.683858: step 154, loss 1.16317, acc 0.546875
2018-09-12T15:46:26.048879: step 155, loss 0.868276, acc 0.6875
2018-09-12T15:46:26.454902: step 156, loss 1.06672, acc 0.609375
2018-09-12T15:46:26.854925: step 157, loss 0.895443, acc 0.609375
2018-09-12T15:46:27.247947: step 158, loss 0.920482, acc 0.65625
2018-09-12T15:46:27.632969: step 159, loss 0.85355, acc 0.65625
2018-09-12T15:46:28.047993: step 160, loss 0.936558, acc 0.578125
2018-09-12T15:46:28.435015: step 161, loss 0.8483, acc 0.625
2018-09-12T15:46:28.861039: step 162, loss 1.15622, acc 0.578125
2018-09-12T15:46:29.262261: step 163, loss 0.763605, acc 0.625
2018-09-12T15:46:29.674284: step 164, loss 0.665512, acc 0.703125
2018-09-12T15:46:30.078307: step 165, loss 0.766787, acc 0.640625
2018-09-12T15:46:30.465329: step 166, loss 1.228, acc 0.515625
2018-09-12T15:46:30.874353: step 167, loss 0.981665, acc 0.640625
2018-09-12T15:46:31.277376: step 168, loss 1.04111, acc 0.53125
2018-09-12T15:46:31.682399: step 169, loss 0.674751, acc 0.71875
2018-09-12T15:46:32.060421: step 170, loss 0.850278, acc 0.609375
2018-09-12T15:46:32.462444: step 171, loss 1.00851, acc 0.625
2018-09-12T15:46:32.856466: step 172, loss 1.05961, acc 0.625
2018-09-12T15:46:33.249489: step 173, loss 0.937948, acc 0.5625
2018-09-12T15:46:33.630510: step 174, loss 0.950708, acc 0.578125
2018-09-12T15:46:34.035534: step 175, loss 0.918974, acc 0.59375
2018-09-12T15:46:34.457558: step 176, loss 1.29639, acc 0.640625
2018-09-12T15:46:34.915584: step 177, loss 1.16261, acc 0.625
2018-09-12T15:46:35.334608: step 178, loss 0.815326, acc 0.71875
2018-09-12T15:46:35.766633: step 179, loss 1.14843, acc 0.5625
2018-09-12T15:46:36.208658: step 180, loss 1.05776, acc 0.453125
2018-09-12T15:46:36.639683: step 181, loss 1.11928, acc 0.53125
2018-09-12T15:46:37.065707: step 182, loss 0.813116, acc 0.609375
2018-09-12T15:46:37.463730: step 183, loss 0.725284, acc 0.65625
2018-09-12T15:46:37.850752: step 184, loss 0.516451, acc 0.765625
2018-09-12T15:46:38.251775: step 185, loss 1.08759, acc 0.578125
2018-09-12T15:46:38.641797: step 186, loss 1.02192, acc 0.609375
2018-09-12T15:46:39.065821: step 187, loss 0.900348, acc 0.578125
2018-09-12T15:46:39.493846: step 188, loss 0.95096, acc 0.59375
2018-09-12T15:46:39.928871: step 189, loss 0.830446, acc 0.625
2018-09-12T15:46:40.341894: step 190, loss 0.788657, acc 0.640625
2018-09-12T15:46:40.758918: step 191, loss 0.501774, acc 0.78125
2018-09-12T15:46:41.183943: step 192, loss 0.959318, acc 0.640625
2018-09-12T15:46:41.566964: step 193, loss 0.821175, acc 0.546875
2018-09-12T15:46:41.963987: step 194, loss 0.758862, acc 0.71875
2018-09-12T15:46:42.344009: step 195, loss 0.717188, acc 0.625
2018-09-12T15:46:42.727031: step 196, loss 0.774551, acc 0.65625
2018-09-12T15:46:43.109053: step 197, loss 0.980916, acc 0.5625
2018-09-12T15:46:43.499273: step 198, loss 0.886681, acc 0.65625
2018-09-12T15:46:43.885295: step 199, loss 0.963223, acc 0.609375
2018-09-12T15:46:44.271317: step 200, loss 0.801583, acc 0.671875

Evaluation:
2018-09-12T15:46:45.274375: step 200, loss 0.680857, acc 0.609756

Saved model checkpoint to ./-200

2018-09-12T15:46:46.517644: step 201, loss 1.01899, acc 0.515625
2018-09-12T15:46:46.930668: step 202, loss 0.862352, acc 0.640625
2018-09-12T15:46:47.249686: step 203, loss 0.778287, acc 0.546875
2018-09-12T15:46:47.651709: step 204, loss 1.18431, acc 0.5625
2018-09-12T15:46:47.987728: step 205, loss 1.25925, acc 0.515625
2018-09-12T15:46:48.305746: step 206, loss 1.05959, acc 0.546875
2018-09-12T15:46:48.630765: step 207, loss 0.830165, acc 0.546875
2018-09-12T15:46:48.952783: step 208, loss 0.920911, acc 0.578125
2018-09-12T15:46:49.273802: step 209, loss 0.73783, acc 0.671875
2018-09-12T15:46:49.644823: step 210, loss 0.978093, acc 0.53125
2018-09-12T15:46:50.062847: step 211, loss 0.852241, acc 0.625
2018-09-12T15:46:50.561875: step 212, loss 0.714629, acc 0.625
2018-09-12T15:46:51.009901: step 213, loss 0.854205, acc 0.609375
2018-09-12T15:46:51.468927: step 214, loss 0.668772, acc 0.625
2018-09-12T15:46:51.955955: step 215, loss 0.795933, acc 0.65625
2018-09-12T15:46:52.480985: step 216, loss 1.08267, acc 0.53125
2018-09-12T15:46:52.928011: step 217, loss 0.872993, acc 0.59375
2018-09-12T15:46:53.354035: step 218, loss 0.856456, acc 0.5625
2018-09-12T15:46:53.746058: step 219, loss 0.986454, acc 0.5625
2018-09-12T15:46:54.147081: step 220, loss 0.848447, acc 0.59375
2018-09-12T15:46:54.530102: step 221, loss 0.994239, acc 0.609375
2018-09-12T15:46:54.926125: step 222, loss 0.748806, acc 0.640625
2018-09-12T15:46:55.304147: step 223, loss 0.773616, acc 0.671875
2018-09-12T15:46:55.698169: step 224, loss 1.11403, acc 0.5625
2018-09-12T15:46:56.083191: step 225, loss 1.00564, acc 0.546875
2018-09-12T15:46:56.476214: step 226, loss 0.765799, acc 0.640625
2018-09-12T15:46:56.867236: step 227, loss 0.881198, acc 0.5625
2018-09-12T15:46:57.260259: step 228, loss 0.846328, acc 0.578125
2018-09-12T15:46:57.640280: step 229, loss 0.665917, acc 0.703125
2018-09-12T15:46:58.035303: step 230, loss 0.736385, acc 0.671875
2018-09-12T15:46:58.419325: step 231, loss 1.02841, acc 0.5625
2018-09-12T15:46:58.847349: step 232, loss 0.744187, acc 0.625
2018-09-12T15:46:59.230970: step 233, loss 0.787694, acc 0.59375
2018-09-12T15:46:59.621993: step 234, loss 0.7293, acc 0.65625
2018-09-12T15:47:00.027016: step 235, loss 1.07874, acc 0.53125
2018-09-12T15:47:00.426039: step 236, loss 0.954269, acc 0.53125
2018-09-12T15:47:00.814061: step 237, loss 1.05515, acc 0.515625
2018-09-12T15:47:01.244086: step 238, loss 1.01665, acc 0.515625
2018-09-12T15:47:01.624107: step 239, loss 0.719227, acc 0.625
2018-09-12T15:47:02.012130: step 240, loss 1.10124, acc 0.578125
2018-09-12T15:47:02.408152: step 241, loss 0.829296, acc 0.640625
2018-09-12T15:47:02.795174: step 242, loss 0.859271, acc 0.640625
2018-09-12T15:47:03.191197: step 243, loss 0.863009, acc 0.546875
2018-09-12T15:47:03.574219: step 244, loss 0.883772, acc 0.546875
2018-09-12T15:47:03.960241: step 245, loss 0.724985, acc 0.703125
2018-09-12T15:47:04.355264: step 246, loss 0.971046, acc 0.578125
2018-09-12T15:47:04.737285: step 247, loss 0.981632, acc 0.5
2018-09-12T15:47:05.125308: step 248, loss 0.845374, acc 0.59375
2018-09-12T15:47:05.520330: step 249, loss 0.860781, acc 0.625
2018-09-12T15:47:05.895352: step 250, loss 0.616031, acc 0.703125
2018-09-12T15:47:06.306375: step 251, loss 0.681652, acc 0.703125
2018-09-12T15:47:06.689397: step 252, loss 0.916343, acc 0.5625
2018-09-12T15:47:07.080419: step 253, loss 0.812004, acc 0.59375
2018-09-12T15:47:07.471442: step 254, loss 0.893648, acc 0.546875
2018-09-12T15:47:07.869465: step 255, loss 0.731868, acc 0.65625
2018-09-12T15:47:08.293489: step 256, loss 0.838618, acc 0.53125
2018-09-12T15:47:08.710513: step 257, loss 0.996308, acc 0.5
2018-09-12T15:47:09.160538: step 258, loss 0.875258, acc 0.578125
2018-09-12T15:47:09.591563: step 259, loss 0.618732, acc 0.640625
2018-09-12T15:47:10.034588: step 260, loss 0.782108, acc 0.59375
2018-09-12T15:47:10.484614: step 261, loss 0.725886, acc 0.6875
2018-09-12T15:47:10.934640: step 262, loss 0.7835, acc 0.703125
2018-09-12T15:47:11.353664: step 263, loss 0.628933, acc 0.65625
2018-09-12T15:47:11.752687: step 264, loss 0.65228, acc 0.640625
2018-09-12T15:47:12.136709: step 265, loss 0.891431, acc 0.65625
2018-09-12T15:47:12.515730: step 266, loss 0.820423, acc 0.640625
2018-09-12T15:47:12.914753: step 267, loss 0.907202, acc 0.5
2018-09-12T15:47:13.306776: step 268, loss 0.734528, acc 0.671875
2018-09-12T15:47:13.700798: step 269, loss 0.72605, acc 0.59375
2018-09-12T15:47:14.082820: step 270, loss 0.72547, acc 0.65625
2018-09-12T15:47:14.475842: step 271, loss 0.876732, acc 0.625
2018-09-12T15:47:14.863865: step 272, loss 0.815156, acc 0.640625
2018-09-12T15:47:15.263887: step 273, loss 0.863658, acc 0.546875
2018-09-12T15:47:15.653910: step 274, loss 0.711897, acc 0.671875
2018-09-12T15:47:16.066933: step 275, loss 0.942091, acc 0.53125
2018-09-12T15:47:16.477957: step 276, loss 0.869725, acc 0.53125
2018-09-12T15:47:16.864979: step 277, loss 0.684652, acc 0.640625
2018-09-12T15:47:17.262002: step 278, loss 0.595302, acc 0.6875
2018-09-12T15:47:17.653024: step 279, loss 0.613131, acc 0.65625
2018-09-12T15:47:18.048047: step 280, loss 0.927496, acc 0.546875
2018-09-12T15:47:18.440069: step 281, loss 0.935434, acc 0.5625
2018-09-12T15:47:18.840092: step 282, loss 0.725202, acc 0.640625
2018-09-12T15:47:19.243714: step 283, loss 0.770841, acc 0.5625
2018-09-12T15:47:19.646737: step 284, loss 0.854201, acc 0.65625
2018-09-12T15:47:20.044760: step 285, loss 0.761475, acc 0.625
2018-09-12T15:47:20.425782: step 286, loss 0.757218, acc 0.640625
2018-09-12T15:47:20.825805: step 287, loss 0.580259, acc 0.671875
2018-09-12T15:47:21.210827: step 288, loss 0.894417, acc 0.609375
2018-09-12T15:47:21.609449: step 289, loss 0.837152, acc 0.65625
2018-09-12T15:47:21.988669: step 290, loss 0.845297, acc 0.625
2018-09-12T15:47:22.418693: step 291, loss 0.769212, acc 0.578125
2018-09-12T15:47:22.864719: step 292, loss 0.914999, acc 0.546875
2018-09-12T15:47:23.310744: step 293, loss 0.70701, acc 0.625
2018-09-12T15:47:23.736769: step 294, loss 0.719064, acc 0.625
2018-09-12T15:47:24.170793: step 295, loss 0.70359, acc 0.546875
2018-09-12T15:47:24.588817: step 296, loss 0.857538, acc 0.515625
2018-09-12T15:47:25.011842: step 297, loss 0.7015, acc 0.6875
2018-09-12T15:47:25.407864: step 298, loss 0.85426, acc 0.5625
2018-09-12T15:47:25.799887: step 299, loss 0.742403, acc 0.671875
2018-09-12T15:47:26.174908: step 300, loss 0.794692, acc 0.566667

Evaluation:
2018-09-12T15:47:27.158964: step 300, loss 0.655044, acc 0.623827

Saved model checkpoint to ./-300

2018-09-12T15:47:28.237026: step 301, loss 0.702466, acc 0.6875
2018-09-12T15:47:28.546044: step 302, loss 0.555037, acc 0.703125
2018-09-12T15:47:28.881063: step 303, loss 0.511018, acc 0.78125
2018-09-12T15:47:29.222082: step 304, loss 0.668813, acc 0.6875
2018-09-12T15:47:29.678906: step 305, loss 0.792201, acc 0.671875
2018-09-12T15:47:30.120931: step 306, loss 0.662378, acc 0.65625
2018-09-12T15:47:30.520954: step 307, loss 0.675876, acc 0.6875
2018-09-12T15:47:30.913977: step 308, loss 0.842924, acc 0.6875
2018-09-12T15:47:31.323000: step 309, loss 0.720944, acc 0.625
2018-09-12T15:47:31.715022: step 310, loss 0.65659, acc 0.546875
2018-09-12T15:47:32.096044: step 311, loss 0.71786, acc 0.609375
2018-09-12T15:47:32.488067: step 312, loss 0.79461, acc 0.625
2018-09-12T15:47:32.874089: step 313, loss 0.752464, acc 0.578125
2018-09-12T15:47:33.264111: step 314, loss 0.707372, acc 0.59375
2018-09-12T15:47:33.649133: step 315, loss 0.685286, acc 0.640625
2018-09-12T15:47:34.038155: step 316, loss 0.792827, acc 0.625
2018-09-12T15:47:34.418177: step 317, loss 0.712489, acc 0.671875
2018-09-12T15:47:34.806199: step 318, loss 0.818975, acc 0.515625
2018-09-12T15:47:35.196221: step 319, loss 0.578163, acc 0.6875
2018-09-12T15:47:35.575243: step 320, loss 0.711299, acc 0.625
2018-09-12T15:47:35.968266: step 321, loss 0.67634, acc 0.671875
2018-09-12T15:47:36.348287: step 322, loss 0.64182, acc 0.640625
2018-09-12T15:47:36.740310: step 323, loss 0.560565, acc 0.71875
2018-09-12T15:47:37.116331: step 324, loss 0.532145, acc 0.75
2018-09-12T15:47:37.509354: step 325, loss 0.641128, acc 0.640625
2018-09-12T15:47:37.893376: step 326, loss 0.60283, acc 0.6875
2018-09-12T15:47:38.287398: step 327, loss 0.697548, acc 0.578125
2018-09-12T15:47:38.719423: step 328, loss 0.838987, acc 0.53125
2018-09-12T15:47:39.162047: step 329, loss 0.6687, acc 0.65625
2018-09-12T15:47:39.582071: step 330, loss 0.622495, acc 0.671875
2018-09-12T15:47:40.033097: step 331, loss 0.564356, acc 0.703125
2018-09-12T15:47:40.486123: step 332, loss 0.791882, acc 0.578125
2018-09-12T15:47:40.926148: step 333, loss 0.80136, acc 0.59375
2018-09-12T15:47:41.311170: step 334, loss 0.632743, acc 0.625
2018-09-12T15:47:41.701193: step 335, loss 0.600452, acc 0.65625
2018-09-12T15:47:42.090215: step 336, loss 0.641725, acc 0.625
2018-09-12T15:47:42.477237: step 337, loss 0.673241, acc 0.671875
2018-09-12T15:47:42.877260: step 338, loss 0.894543, acc 0.640625
2018-09-12T15:47:43.261282: step 339, loss 0.632103, acc 0.703125
2018-09-12T15:47:43.635303: step 340, loss 0.692052, acc 0.609375
2018-09-12T15:47:44.028326: step 341, loss 0.537287, acc 0.703125
2018-09-12T15:47:44.410348: step 342, loss 0.58814, acc 0.65625
2018-09-12T15:47:44.809370: step 343, loss 0.62661, acc 0.640625
2018-09-12T15:47:45.188392: step 344, loss 0.696787, acc 0.640625
2018-09-12T15:47:45.580415: step 345, loss 0.495148, acc 0.71875
2018-09-12T15:47:45.984438: step 346, loss 0.708524, acc 0.640625
2018-09-12T15:47:46.381460: step 347, loss 0.697603, acc 0.640625
2018-09-12T15:47:46.771681: step 348, loss 0.60434, acc 0.65625
2018-09-12T15:47:47.150703: step 349, loss 0.446877, acc 0.796875
2018-09-12T15:47:47.542725: step 350, loss 0.64384, acc 0.65625
2018-09-12T15:47:47.924747: step 351, loss 0.488996, acc 0.78125
2018-09-12T15:47:48.312769: step 352, loss 0.549814, acc 0.6875
2018-09-12T15:47:48.695791: step 353, loss 0.722035, acc 0.609375
2018-09-12T15:47:49.081813: step 354, loss 0.719019, acc 0.59375
2018-09-12T15:47:49.480836: step 355, loss 0.562077, acc 0.75
2018-09-12T15:47:49.869858: step 356, loss 0.662614, acc 0.640625
2018-09-12T15:47:50.259880: step 357, loss 0.620961, acc 0.640625
2018-09-12T15:47:50.661903: step 358, loss 0.750177, acc 0.59375
2018-09-12T15:47:51.044925: step 359, loss 0.63204, acc 0.703125
2018-09-12T15:47:51.433948: step 360, loss 0.476084, acc 0.796875
2018-09-12T15:47:51.811969: step 361, loss 0.541086, acc 0.75
2018-09-12T15:47:52.199991: step 362, loss 0.732074, acc 0.578125
2018-09-12T15:47:52.578013: step 363, loss 0.77203, acc 0.59375
2018-09-12T15:47:52.981036: step 364, loss 0.655726, acc 0.75
2018-09-12T15:47:53.359058: step 365, loss 0.667714, acc 0.703125
2018-09-12T15:47:53.755080: step 366, loss 0.64514, acc 0.71875
2018-09-12T15:47:54.206106: step 367, loss 0.610726, acc 0.671875
2018-09-12T15:47:54.634131: step 368, loss 0.605087, acc 0.6875
2018-09-12T15:47:55.071156: step 369, loss 0.565114, acc 0.703125
2018-09-12T15:47:55.508181: step 370, loss 0.572259, acc 0.71875
2018-09-12T15:47:55.941205: step 371, loss 0.647532, acc 0.703125
2018-09-12T15:47:56.359229: step 372, loss 0.703514, acc 0.640625
2018-09-12T15:47:56.801255: step 373, loss 0.725625, acc 0.65625
2018-09-12T15:47:57.193277: step 374, loss 0.682408, acc 0.609375
2018-09-12T15:47:57.577299: step 375, loss 0.546929, acc 0.703125
2018-09-12T15:47:57.959321: step 376, loss 0.574238, acc 0.734375
2018-09-12T15:47:58.359344: step 377, loss 0.525268, acc 0.6875
2018-09-12T15:47:58.752366: step 378, loss 0.710251, acc 0.671875
2018-09-12T15:47:59.138987: step 379, loss 0.615362, acc 0.6875
2018-09-12T15:47:59.545011: step 380, loss 0.548961, acc 0.71875
2018-09-12T15:47:59.932033: step 381, loss 0.717864, acc 0.671875
2018-09-12T15:48:00.327055: step 382, loss 0.748914, acc 0.703125
2018-09-12T15:48:00.731079: step 383, loss 0.639689, acc 0.671875
2018-09-12T15:48:01.139102: step 384, loss 0.656053, acc 0.6875
2018-09-12T15:48:01.534124: step 385, loss 0.776444, acc 0.640625
2018-09-12T15:48:01.924147: step 386, loss 0.742875, acc 0.609375
2018-09-12T15:48:02.310169: step 387, loss 0.604627, acc 0.625
2018-09-12T15:48:02.696191: step 388, loss 0.697704, acc 0.546875
2018-09-12T15:48:03.094214: step 389, loss 0.618118, acc 0.703125
2018-09-12T15:48:03.474235: step 390, loss 0.679265, acc 0.703125
2018-09-12T15:48:03.871258: step 391, loss 0.659092, acc 0.609375
2018-09-12T15:48:04.250280: step 392, loss 0.665065, acc 0.671875
2018-09-12T15:48:04.641302: step 393, loss 0.590965, acc 0.703125
2018-09-12T15:48:05.032325: step 394, loss 0.720603, acc 0.625
2018-09-12T15:48:05.436348: step 395, loss 0.70762, acc 0.625
2018-09-12T15:48:05.827370: step 396, loss 0.665705, acc 0.625
2018-09-12T15:48:06.214392: step 397, loss 0.58737, acc 0.65625
2018-09-12T15:48:06.597414: step 398, loss 0.617295, acc 0.65625
2018-09-12T15:48:06.994437: step 399, loss 0.603833, acc 0.671875
2018-09-12T15:48:07.370458: step 400, loss 0.705091, acc 0.625

Evaluation:
2018-09-12T15:48:08.381516: step 400, loss 0.673624, acc 0.586304

Saved model checkpoint to ./-400

2018-09-12T15:48:09.532181: step 401, loss 0.693138, acc 0.6875
2018-09-12T15:48:09.853199: step 402, loss 0.681525, acc 0.65625
2018-09-12T15:48:10.229221: step 403, loss 0.598061, acc 0.71875
2018-09-12T15:48:10.593242: step 404, loss 0.61746, acc 0.703125
2018-09-12T15:48:11.170473: step 405, loss 0.62289, acc 0.671875
2018-09-12T15:48:11.620499: step 406, loss 0.604627, acc 0.6875
2018-09-12T15:48:12.059524: step 407, loss 0.64678, acc 0.640625
2018-09-12T15:48:12.477548: step 408, loss 0.781228, acc 0.546875
2018-09-12T15:48:12.909572: step 409, loss 0.745701, acc 0.515625
2018-09-12T15:48:13.300595: step 410, loss 0.688762, acc 0.640625
2018-09-12T15:48:13.690617: step 411, loss 0.71956, acc 0.59375
2018-09-12T15:48:14.078639: step 412, loss 0.661834, acc 0.65625
2018-09-12T15:48:14.472662: step 413, loss 0.702292, acc 0.59375
2018-09-12T15:48:14.860684: step 414, loss 0.663699, acc 0.640625
2018-09-12T15:48:15.267707: step 415, loss 0.675859, acc 0.65625
2018-09-12T15:48:15.643729: step 416, loss 0.661494, acc 0.671875
2018-09-12T15:48:16.046752: step 417, loss 0.715986, acc 0.5625
2018-09-12T15:48:16.438774: step 418, loss 0.640551, acc 0.65625
2018-09-12T15:48:16.827797: step 419, loss 0.751387, acc 0.609375
2018-09-12T15:48:17.222819: step 420, loss 0.566624, acc 0.6875
2018-09-12T15:48:17.607841: step 421, loss 0.624415, acc 0.65625
2018-09-12T15:48:17.994062: step 422, loss 0.707375, acc 0.640625
2018-09-12T15:48:18.394084: step 423, loss 0.576206, acc 0.6875
2018-09-12T15:48:18.776106: step 424, loss 0.619145, acc 0.65625
2018-09-12T15:48:19.185130: step 425, loss 0.577469, acc 0.734375
2018-09-12T15:48:19.584153: step 426, loss 0.647278, acc 0.609375
2018-09-12T15:48:19.989176: step 427, loss 0.814431, acc 0.53125
2018-09-12T15:48:20.394199: step 428, loss 0.585593, acc 0.71875
2018-09-12T15:48:20.840224: step 429, loss 0.569427, acc 0.671875
2018-09-12T15:48:21.314251: step 430, loss 0.512998, acc 0.703125
2018-09-12T15:48:21.797279: step 431, loss 0.643615, acc 0.65625
2018-09-12T15:48:22.296308: step 432, loss 0.588395, acc 0.703125
2018-09-12T15:48:22.798336: step 433, loss 0.498118, acc 0.8125
2018-09-12T15:48:23.270363: step 434, loss 0.56226, acc 0.75
2018-09-12T15:48:23.764392: step 435, loss 0.764844, acc 0.625
2018-09-12T15:48:24.258420: step 436, loss 0.697622, acc 0.5625
2018-09-12T15:48:24.735447: step 437, loss 0.566848, acc 0.671875
2018-09-12T15:48:25.260477: step 438, loss 0.634771, acc 0.71875
2018-09-12T15:48:25.705503: step 439, loss 0.519715, acc 0.71875
2018-09-12T15:48:26.112526: step 440, loss 0.835298, acc 0.5625
2018-09-12T15:48:26.521549: step 441, loss 0.580625, acc 0.703125
2018-09-12T15:48:26.959574: step 442, loss 0.571456, acc 0.75
2018-09-12T15:48:27.393599: step 443, loss 0.644931, acc 0.578125
2018-09-12T15:48:27.797622: step 444, loss 0.560496, acc 0.671875
2018-09-12T15:48:28.268046: step 445, loss 0.710735, acc 0.625
2018-09-12T15:48:28.688070: step 446, loss 0.704413, acc 0.703125
2018-09-12T15:48:29.136695: step 447, loss 0.762198, acc 0.640625
2018-09-12T15:48:29.543718: step 448, loss 0.652623, acc 0.59375
2018-09-12T15:48:29.949741: step 449, loss 0.582949, acc 0.65625
2018-09-12T15:48:30.322762: step 450, loss 0.736841, acc 0.7
2018-09-12T15:48:30.723785: step 451, loss 0.532254, acc 0.78125
2018-09-12T15:48:31.107807: step 452, loss 0.687602, acc 0.6875
2018-09-12T15:48:31.520831: step 453, loss 0.68666, acc 0.65625
2018-09-12T15:48:31.960856: step 454, loss 0.562967, acc 0.734375
2018-09-12T15:48:32.368879: step 455, loss 0.596524, acc 0.65625
2018-09-12T15:48:32.776903: step 456, loss 0.55441, acc 0.78125
2018-09-12T15:48:33.155924: step 457, loss 0.397466, acc 0.875
2018-09-12T15:48:33.561948: step 458, loss 0.484053, acc 0.765625
2018-09-12T15:48:34.014974: step 459, loss 0.626437, acc 0.640625
2018-09-12T15:48:34.415997: step 460, loss 0.6132, acc 0.71875
2018-09-12T15:48:34.799018: step 461, loss 0.520161, acc 0.640625
2018-09-12T15:48:35.197041: step 462, loss 0.468494, acc 0.78125
2018-09-12T15:48:35.585063: step 463, loss 0.465772, acc 0.765625
2018-09-12T15:48:35.974086: step 464, loss 0.574674, acc 0.671875
2018-09-12T15:48:36.407110: step 465, loss 0.555634, acc 0.71875
2018-09-12T15:48:36.805133: step 466, loss 0.578504, acc 0.6875
2018-09-12T15:48:37.206156: step 467, loss 0.531779, acc 0.796875
2018-09-12T15:48:37.596178: step 468, loss 0.558302, acc 0.71875
2018-09-12T15:48:37.991201: step 469, loss 0.627819, acc 0.671875
2018-09-12T15:48:38.383223: step 470, loss 0.480225, acc 0.75
2018-09-12T15:48:38.776246: step 471, loss 0.566512, acc 0.71875
2018-09-12T15:48:39.164268: step 472, loss 0.469412, acc 0.75
2018-09-12T15:48:39.571291: step 473, loss 0.562283, acc 0.6875
2018-09-12T15:48:39.958314: step 474, loss 0.428437, acc 0.828125
2018-09-12T15:48:40.355336: step 475, loss 0.550738, acc 0.703125
2018-09-12T15:48:40.741358: step 476, loss 0.659382, acc 0.625
2018-09-12T15:48:41.132381: step 477, loss 0.630113, acc 0.65625
2018-09-12T15:48:41.530403: step 478, loss 0.56852, acc 0.703125
2018-09-12T15:48:41.912425: step 479, loss 0.719464, acc 0.625
2018-09-12T15:48:42.365451: step 480, loss 0.514384, acc 0.765625
2018-09-12T15:48:42.827478: step 481, loss 0.614439, acc 0.625
2018-09-12T15:48:43.262503: step 482, loss 0.605202, acc 0.65625
2018-09-12T15:48:43.707528: step 483, loss 0.528062, acc 0.734375
2018-09-12T15:48:44.137553: step 484, loss 0.56153, acc 0.6875
2018-09-12T15:48:44.566577: step 485, loss 0.544798, acc 0.703125
2018-09-12T15:48:44.991601: step 486, loss 0.622605, acc 0.6875
2018-09-12T15:48:45.404625: step 487, loss 0.473696, acc 0.734375
2018-09-12T15:48:45.800648: step 488, loss 0.54352, acc 0.6875
2018-09-12T15:48:46.205671: step 489, loss 0.61306, acc 0.75
2018-09-12T15:48:46.677698: step 490, loss 0.439648, acc 0.828125
2018-09-12T15:48:47.153725: step 491, loss 0.527576, acc 0.734375
2018-09-12T15:48:47.636753: step 492, loss 0.487162, acc 0.75
2018-09-12T15:48:48.067777: step 493, loss 0.464346, acc 0.828125
2018-09-12T15:48:48.528804: step 494, loss 0.631396, acc 0.65625
2018-09-12T15:48:48.999831: step 495, loss 0.509301, acc 0.796875
2018-09-12T15:48:49.459857: step 496, loss 0.470283, acc 0.78125
2018-09-12T15:48:49.926884: step 497, loss 0.478752, acc 0.765625
2018-09-12T15:48:50.344908: step 498, loss 0.62921, acc 0.65625
2018-09-12T15:48:50.838936: step 499, loss 0.678735, acc 0.609375
2018-09-12T15:48:51.275961: step 500, loss 0.566538, acc 0.671875

Evaluation:
2018-09-12T15:48:52.409026: step 500, loss 0.620875, acc 0.659475

Saved model checkpoint to ./-500

2018-09-12T15:48:53.801105: step 501, loss 0.63214, acc 0.71875
2018-09-12T15:48:54.165126: step 502, loss 0.572067, acc 0.671875
2018-09-12T15:48:54.547148: step 503, loss 0.581592, acc 0.640625
2018-09-12T15:48:54.976173: step 504, loss 0.546877, acc 0.703125
2018-09-12T15:48:55.388196: step 505, loss 0.532068, acc 0.78125
2018-09-12T15:48:55.796219: step 506, loss 0.522352, acc 0.75
2018-09-12T15:48:56.188242: step 507, loss 0.854206, acc 0.5625
2018-09-12T15:48:56.591265: step 508, loss 0.478766, acc 0.78125
2018-09-12T15:48:56.978287: step 509, loss 0.490403, acc 0.734375
2018-09-12T15:48:57.381310: step 510, loss 0.712418, acc 0.578125
2018-09-12T15:48:57.786333: step 511, loss 0.427498, acc 0.8125
2018-09-12T15:48:58.203357: step 512, loss 0.55661, acc 0.71875
2018-09-12T15:48:58.671384: step 513, loss 0.666029, acc 0.671875
2018-09-12T15:48:59.102008: step 514, loss 0.528724, acc 0.765625
2018-09-12T15:48:59.562034: step 515, loss 0.483766, acc 0.765625
2018-09-12T15:49:00.010060: step 516, loss 0.603019, acc 0.6875
2018-09-12T15:49:00.458085: step 517, loss 0.652463, acc 0.640625
2018-09-12T15:49:00.909111: step 518, loss 0.598586, acc 0.609375
2018-09-12T15:49:01.350136: step 519, loss 0.53798, acc 0.6875
2018-09-12T15:49:01.747159: step 520, loss 0.642094, acc 0.625
2018-09-12T15:49:02.137181: step 521, loss 0.582867, acc 0.671875
2018-09-12T15:49:02.547804: step 522, loss 0.574805, acc 0.6875
2018-09-12T15:49:02.961828: step 523, loss 0.707603, acc 0.640625
2018-09-12T15:49:03.361850: step 524, loss 0.501878, acc 0.734375
2018-09-12T15:49:03.754873: step 525, loss 0.586237, acc 0.71875
2018-09-12T15:49:04.146895: step 526, loss 0.560284, acc 0.71875
2018-09-12T15:49:04.537918: step 527, loss 0.510724, acc 0.734375
2018-09-12T15:49:04.938941: step 528, loss 0.552169, acc 0.6875
2018-09-12T15:49:05.331963: step 529, loss 0.552808, acc 0.703125
2018-09-12T15:49:05.728986: step 530, loss 0.529253, acc 0.765625
2018-09-12T15:49:06.133009: step 531, loss 0.547549, acc 0.765625
2018-09-12T15:49:06.519031: step 532, loss 0.576449, acc 0.65625
2018-09-12T15:49:06.915054: step 533, loss 0.548076, acc 0.6875
2018-09-12T15:49:07.304076: step 534, loss 0.606627, acc 0.6875
2018-09-12T15:49:07.704297: step 535, loss 0.601939, acc 0.703125
2018-09-12T15:49:08.090319: step 536, loss 0.680477, acc 0.609375
2018-09-12T15:49:08.489342: step 537, loss 0.51362, acc 0.6875
2018-09-12T15:49:08.888365: step 538, loss 0.538561, acc 0.703125
2018-09-12T15:49:09.284987: step 539, loss 0.641373, acc 0.65625
2018-09-12T15:49:09.692010: step 540, loss 0.602649, acc 0.671875
2018-09-12T15:49:10.079032: step 541, loss 0.476035, acc 0.71875
2018-09-12T15:49:10.471054: step 542, loss 0.516154, acc 0.703125
2018-09-12T15:49:10.870077: step 543, loss 0.713532, acc 0.578125
2018-09-12T15:49:11.280101: step 544, loss 0.517968, acc 0.71875
2018-09-12T15:49:11.675123: step 545, loss 0.592677, acc 0.625
2018-09-12T15:49:12.079146: step 546, loss 0.714773, acc 0.609375
2018-09-12T15:49:12.482169: step 547, loss 0.549371, acc 0.734375
2018-09-12T15:49:12.891193: step 548, loss 0.553711, acc 0.671875
2018-09-12T15:49:13.317217: step 549, loss 0.582239, acc 0.703125
2018-09-12T15:49:13.710240: step 550, loss 0.637763, acc 0.65625
2018-09-12T15:49:14.120263: step 551, loss 0.49066, acc 0.796875
2018-09-12T15:49:14.521286: step 552, loss 0.503832, acc 0.796875
2018-09-12T15:49:14.919309: step 553, loss 0.469644, acc 0.765625
2018-09-12T15:49:15.316332: step 554, loss 0.488744, acc 0.765625
2018-09-12T15:49:15.727355: step 555, loss 0.50319, acc 0.703125
2018-09-12T15:49:16.194382: step 556, loss 0.513346, acc 0.703125
2018-09-12T15:49:16.809417: step 557, loss 0.52764, acc 0.71875
2018-09-12T15:49:17.357448: step 558, loss 0.467611, acc 0.78125
2018-09-12T15:49:17.805474: step 559, loss 0.670106, acc 0.59375
2018-09-12T15:49:18.250499: step 560, loss 0.716642, acc 0.625
2018-09-12T15:49:18.696525: step 561, loss 0.487873, acc 0.75
2018-09-12T15:49:19.129149: step 562, loss 0.662543, acc 0.703125
2018-09-12T15:49:19.575174: step 563, loss 0.521484, acc 0.71875
2018-09-12T15:49:20.100204: step 564, loss 0.534643, acc 0.765625
2018-09-12T15:49:20.497227: step 565, loss 0.569956, acc 0.71875
2018-09-12T15:49:20.942253: step 566, loss 0.457506, acc 0.8125
2018-09-12T15:49:21.392278: step 567, loss 0.593517, acc 0.71875
2018-09-12T15:49:21.897307: step 568, loss 0.57015, acc 0.71875
2018-09-12T15:49:22.306331: step 569, loss 0.542534, acc 0.734375
2018-09-12T15:49:22.752356: step 570, loss 0.560208, acc 0.71875
2018-09-12T15:49:23.217383: step 571, loss 0.546435, acc 0.734375
2018-09-12T15:49:23.686409: step 572, loss 0.787353, acc 0.5625
2018-09-12T15:49:24.179438: step 573, loss 0.543824, acc 0.703125
2018-09-12T15:49:24.664465: step 574, loss 0.603615, acc 0.734375
2018-09-12T15:49:25.131492: step 575, loss 0.566546, acc 0.71875
2018-09-12T15:49:25.575518: step 576, loss 0.58623, acc 0.671875
2018-09-12T15:49:26.038544: step 577, loss 0.614284, acc 0.734375
2018-09-12T15:49:26.498570: step 578, loss 0.548277, acc 0.703125
2018-09-12T15:49:26.954596: step 579, loss 0.540906, acc 0.765625
2018-09-12T15:49:27.389621: step 580, loss 0.57684, acc 0.703125
2018-09-12T15:49:27.863648: step 581, loss 0.639059, acc 0.640625
2018-09-12T15:49:28.329675: step 582, loss 0.57337, acc 0.71875
2018-09-12T15:49:28.780701: step 583, loss 0.557845, acc 0.71875
2018-09-12T15:49:29.218726: step 584, loss 0.554185, acc 0.71875
2018-09-12T15:49:29.617749: step 585, loss 0.609531, acc 0.703125
2018-09-12T15:49:30.003969: step 586, loss 0.535609, acc 0.671875
2018-09-12T15:49:30.405992: step 587, loss 0.551034, acc 0.734375
2018-09-12T15:49:30.869019: step 588, loss 0.461245, acc 0.796875
2018-09-12T15:49:31.320044: step 589, loss 0.531395, acc 0.75
2018-09-12T15:49:31.787071: step 590, loss 0.694262, acc 0.65625
2018-09-12T15:49:32.209492: step 591, loss 0.585293, acc 0.71875
2018-09-12T15:49:32.653517: step 592, loss 0.478612, acc 0.734375
2018-09-12T15:49:33.071541: step 593, loss 0.623921, acc 0.640625
2018-09-12T15:49:33.466564: step 594, loss 0.569398, acc 0.75
2018-09-12T15:49:33.877587: step 595, loss 0.546215, acc 0.734375
2018-09-12T15:49:34.269610: step 596, loss 0.661834, acc 0.65625
2018-09-12T15:49:34.681633: step 597, loss 0.478523, acc 0.734375
2018-09-12T15:49:35.068655: step 598, loss 0.556543, acc 0.71875
2018-09-12T15:49:35.471678: step 599, loss 0.633977, acc 0.59375
2018-09-12T15:49:35.845700: step 600, loss 0.535138, acc 0.716667

Evaluation:
2018-09-12T15:49:36.902760: step 600, loss 0.643465, acc 0.620075

Saved model checkpoint to ./-600

2018-09-12T15:49:39.365099: step 601, loss 0.496896, acc 0.734375
2018-09-12T15:49:39.706119: step 602, loss 0.634178, acc 0.71875
2018-09-12T15:49:40.037138: step 603, loss 0.457794, acc 0.796875
2018-09-12T15:49:40.384158: step 604, loss 0.640122, acc 0.5625
2018-09-12T15:49:40.785181: step 605, loss 0.603289, acc 0.703125
2018-09-12T15:49:41.175203: step 606, loss 0.538991, acc 0.734375
2018-09-12T15:49:41.590227: step 607, loss 0.446445, acc 0.765625
2018-09-12T15:49:41.984249: step 608, loss 0.589652, acc 0.703125
2018-09-12T15:49:42.389272: step 609, loss 0.603569, acc 0.65625
2018-09-12T15:49:42.778295: step 610, loss 0.444888, acc 0.796875
2018-09-12T15:49:43.173317: step 611, loss 0.578356, acc 0.734375
2018-09-12T15:49:43.560339: step 612, loss 0.543968, acc 0.765625
2018-09-12T15:49:43.961362: step 613, loss 0.555662, acc 0.734375
2018-09-12T15:49:44.369386: step 614, loss 0.550192, acc 0.734375
2018-09-12T15:49:44.762408: step 615, loss 0.508725, acc 0.765625
2018-09-12T15:49:45.152430: step 616, loss 0.597794, acc 0.671875
2018-09-12T15:49:45.553453: step 617, loss 0.546237, acc 0.65625
2018-09-12T15:49:45.939475: step 618, loss 0.468664, acc 0.734375
2018-09-12T15:49:46.356499: step 619, loss 0.527814, acc 0.71875
2018-09-12T15:49:46.813525: step 620, loss 0.462536, acc 0.78125
2018-09-12T15:49:47.257551: step 621, loss 0.433635, acc 0.828125
2018-09-12T15:49:47.688575: step 622, loss 0.507168, acc 0.703125
2018-09-12T15:49:48.130601: step 623, loss 0.464956, acc 0.828125
2018-09-12T15:49:48.569626: step 624, loss 0.506205, acc 0.75
2018-09-12T15:49:49.016651: step 625, loss 0.461695, acc 0.734375
2018-09-12T15:49:49.434274: step 626, loss 0.44731, acc 0.734375
2018-09-12T15:49:49.841298: step 627, loss 0.414613, acc 0.78125
2018-09-12T15:49:50.226320: step 628, loss 0.487165, acc 0.734375
2018-09-12T15:49:50.631343: step 629, loss 0.451007, acc 0.796875
2018-09-12T15:49:51.022365: step 630, loss 0.699319, acc 0.640625
2018-09-12T15:49:51.430389: step 631, loss 0.482139, acc 0.765625
2018-09-12T15:49:51.820411: step 632, loss 0.567543, acc 0.703125
2018-09-12T15:49:52.213433: step 633, loss 0.474056, acc 0.75
2018-09-12T15:49:52.604456: step 634, loss 0.532992, acc 0.765625
2018-09-12T15:49:53.010479: step 635, loss 0.450645, acc 0.78125
2018-09-12T15:49:53.391501: step 636, loss 0.536559, acc 0.78125
2018-09-12T15:49:53.780523: step 637, loss 0.621771, acc 0.640625
2018-09-12T15:49:54.181546: step 638, loss 0.379087, acc 0.828125
2018-09-12T15:49:54.566568: step 639, loss 0.620981, acc 0.703125
2018-09-12T15:49:54.948590: step 640, loss 0.538292, acc 0.6875
2018-09-12T15:49:55.351613: step 641, loss 0.39197, acc 0.84375
2018-09-12T15:49:55.745635: step 642, loss 0.498575, acc 0.796875
2018-09-12T15:49:56.137658: step 643, loss 0.491556, acc 0.75
2018-09-12T15:49:56.530680: step 644, loss 0.450688, acc 0.8125
2018-09-12T15:49:56.935703: step 645, loss 0.434702, acc 0.8125
2018-09-12T15:49:57.362728: step 646, loss 0.395237, acc 0.859375
2018-09-12T15:49:57.760751: step 647, loss 0.469612, acc 0.765625
2018-09-12T15:49:58.227777: step 648, loss 0.597879, acc 0.734375
2018-09-12T15:49:58.718805: step 649, loss 0.46884, acc 0.78125
2018-09-12T15:49:59.124428: step 650, loss 0.486138, acc 0.6875
2018-09-12T15:49:59.547051: step 651, loss 0.485021, acc 0.765625
2018-09-12T15:49:59.937073: step 652, loss 0.571777, acc 0.703125
2018-09-12T15:50:00.338096: step 653, loss 0.465792, acc 0.84375
2018-09-12T15:50:00.729119: step 654, loss 0.391954, acc 0.8125
2018-09-12T15:50:01.123141: step 655, loss 0.562783, acc 0.65625
2018-09-12T15:50:01.518763: step 656, loss 0.469524, acc 0.78125
2018-09-12T15:50:01.929386: step 657, loss 0.449268, acc 0.765625
2018-09-12T15:50:02.345409: step 658, loss 0.489128, acc 0.75
2018-09-12T15:50:02.777434: step 659, loss 0.372638, acc 0.828125
2018-09-12T15:50:03.210459: step 660, loss 0.555671, acc 0.671875
2018-09-12T15:50:03.648484: step 661, loss 0.44129, acc 0.75
2018-09-12T15:50:04.087509: step 662, loss 0.453523, acc 0.796875
2018-09-12T15:50:04.541733: step 663, loss 0.555745, acc 0.71875
2018-09-12T15:50:04.977357: step 664, loss 0.551771, acc 0.671875
2018-09-12T15:50:05.383381: step 665, loss 0.625009, acc 0.6875
2018-09-12T15:50:05.779403: step 666, loss 0.420984, acc 0.828125
2018-09-12T15:50:06.173426: step 667, loss 0.566168, acc 0.734375
2018-09-12T15:50:06.565448: step 668, loss 0.502218, acc 0.75
2018-09-12T15:50:06.951470: step 669, loss 0.510825, acc 0.796875
2018-09-12T15:50:07.348493: step 670, loss 0.420184, acc 0.8125
2018-09-12T15:50:07.807519: step 671, loss 0.702553, acc 0.59375
2018-09-12T15:50:08.241544: step 672, loss 0.523201, acc 0.734375
2018-09-12T15:50:08.695570: step 673, loss 0.467874, acc 0.78125
2018-09-12T15:50:09.124595: step 674, loss 0.558885, acc 0.71875
2018-09-12T15:50:09.577620: step 675, loss 0.571261, acc 0.71875
2018-09-12T15:50:10.063648: step 676, loss 0.511319, acc 0.796875
2018-09-12T15:50:10.593679: step 677, loss 0.472051, acc 0.765625
2018-09-12T15:50:11.076706: step 678, loss 0.509064, acc 0.75
2018-09-12T15:50:11.525732: step 679, loss 0.62212, acc 0.65625
2018-09-12T15:50:11.997759: step 680, loss 0.457892, acc 0.8125
2018-09-12T15:50:12.474786: step 681, loss 0.635507, acc 0.609375
2018-09-12T15:50:12.947813: step 682, loss 0.417304, acc 0.859375
2018-09-12T15:50:13.374838: step 683, loss 0.500436, acc 0.765625
2018-09-12T15:50:13.821863: step 684, loss 0.572066, acc 0.6875
2018-09-12T15:50:14.264889: step 685, loss 0.438479, acc 0.765625
2018-09-12T15:50:14.728915: step 686, loss 0.531728, acc 0.703125
2018-09-12T15:50:15.134938: step 687, loss 0.594062, acc 0.78125
2018-09-12T15:50:15.591964: step 688, loss 0.511104, acc 0.75
2018-09-12T15:50:16.061991: step 689, loss 0.480315, acc 0.8125
2018-09-12T15:50:16.541019: step 690, loss 0.571674, acc 0.6875
2018-09-12T15:50:17.040047: step 691, loss 0.445425, acc 0.75
2018-09-12T15:50:17.443070: step 692, loss 0.642602, acc 0.625
2018-09-12T15:50:17.851094: step 693, loss 0.371625, acc 0.796875
2018-09-12T15:50:18.326121: step 694, loss 0.517251, acc 0.75
2018-09-12T15:50:18.821149: step 695, loss 0.490238, acc 0.703125
2018-09-12T15:50:19.339179: step 696, loss 0.464945, acc 0.765625
2018-09-12T15:50:19.809206: step 697, loss 0.397175, acc 0.84375
2018-09-12T15:50:20.288233: step 698, loss 0.497662, acc 0.765625
2018-09-12T15:50:20.798262: step 699, loss 0.589034, acc 0.71875
2018-09-12T15:50:21.279290: step 700, loss 0.476766, acc 0.8125

Evaluation:
2018-09-12T15:50:22.438356: step 700, loss 0.603942, acc 0.67636

Saved model checkpoint to ./-700

2018-09-12T15:50:24.304463: step 701, loss 0.446994, acc 0.734375
2018-09-12T15:50:24.678484: step 702, loss 0.482787, acc 0.765625
2018-09-12T15:50:25.176513: step 703, loss 0.529855, acc 0.765625
2018-09-12T15:50:25.584536: step 704, loss 0.477696, acc 0.78125
2018-09-12T15:50:25.968558: step 705, loss 0.561393, acc 0.734375
2018-09-12T15:50:26.361580: step 706, loss 0.552834, acc 0.75
2018-09-12T15:50:26.775604: step 707, loss 0.50916, acc 0.71875
2018-09-12T15:50:27.177627: step 708, loss 0.491361, acc 0.734375
2018-09-12T15:50:27.558649: step 709, loss 0.626138, acc 0.6875
2018-09-12T15:50:27.966672: step 710, loss 0.563533, acc 0.703125
2018-09-12T15:50:28.361695: step 711, loss 0.432522, acc 0.8125
2018-09-12T15:50:28.760718: step 712, loss 0.476165, acc 0.71875
2018-09-12T15:50:29.159340: step 713, loss 0.473247, acc 0.828125
2018-09-12T15:50:29.550362: step 714, loss 0.422729, acc 0.796875
2018-09-12T15:50:29.934384: step 715, loss 0.538889, acc 0.71875
2018-09-12T15:50:30.324406: step 716, loss 0.573443, acc 0.703125
2018-09-12T15:50:30.726429: step 717, loss 0.559931, acc 0.703125
2018-09-12T15:50:31.110451: step 718, loss 0.503176, acc 0.6875
2018-09-12T15:50:31.494473: step 719, loss 0.446655, acc 0.8125
2018-09-12T15:50:31.905497: step 720, loss 0.455508, acc 0.8125
2018-09-12T15:50:32.287519: step 721, loss 0.557054, acc 0.734375
2018-09-12T15:50:32.691542: step 722, loss 0.508909, acc 0.75
2018-09-12T15:50:33.190570: step 723, loss 0.469994, acc 0.796875
2018-09-12T15:50:33.614594: step 724, loss 0.5935, acc 0.703125
2018-09-12T15:50:34.074819: step 725, loss 0.660167, acc 0.6875
2018-09-12T15:50:34.546846: step 726, loss 0.56163, acc 0.6875
2018-09-12T15:50:35.056875: step 727, loss 0.50249, acc 0.6875
2018-09-12T15:50:35.562904: step 728, loss 0.403694, acc 0.828125
2018-09-12T15:50:36.085934: step 729, loss 0.571357, acc 0.71875
2018-09-12T15:50:36.603964: step 730, loss 0.583699, acc 0.625
2018-09-12T15:50:37.098992: step 731, loss 0.568951, acc 0.734375
2018-09-12T15:50:37.599021: step 732, loss 0.515012, acc 0.78125
2018-09-12T15:50:38.065047: step 733, loss 0.502753, acc 0.78125
2018-09-12T15:50:38.503072: step 734, loss 0.458943, acc 0.796875
2018-09-12T15:50:39.010101: step 735, loss 0.467813, acc 0.8125
2018-09-12T15:50:39.427125: step 736, loss 0.609021, acc 0.640625
2018-09-12T15:50:39.885151: step 737, loss 0.468123, acc 0.765625
2018-09-12T15:50:40.361179: step 738, loss 0.466372, acc 0.78125
2018-09-12T15:50:40.790203: step 739, loss 0.404611, acc 0.75
2018-09-12T15:50:41.240229: step 740, loss 0.522986, acc 0.734375
2018-09-12T15:50:41.725257: step 741, loss 0.492375, acc 0.734375
2018-09-12T15:50:42.265287: step 742, loss 0.454247, acc 0.765625
2018-09-12T15:50:42.723314: step 743, loss 0.54023, acc 0.78125
2018-09-12T15:50:43.140338: step 744, loss 0.664519, acc 0.6875
2018-09-12T15:50:43.624365: step 745, loss 0.495251, acc 0.8125
2018-09-12T15:50:44.049390: step 746, loss 0.374536, acc 0.875
2018-09-12T15:50:44.548616: step 747, loss 0.428217, acc 0.75
2018-09-12T15:50:44.971641: step 748, loss 0.451802, acc 0.828125
2018-09-12T15:50:45.447668: step 749, loss 0.556866, acc 0.765625
2018-09-12T15:50:45.826689: step 750, loss 0.427439, acc 0.8
2018-09-12T15:50:46.225712: step 751, loss 0.513205, acc 0.71875
2018-09-12T15:50:46.623735: step 752, loss 0.513213, acc 0.71875
2018-09-12T15:50:47.034759: step 753, loss 0.567142, acc 0.71875
2018-09-12T15:50:47.433781: step 754, loss 0.444057, acc 0.78125
2018-09-12T15:50:47.904808: step 755, loss 0.401214, acc 0.78125
2018-09-12T15:50:48.315832: step 756, loss 0.477321, acc 0.703125
2018-09-12T15:50:48.820861: step 757, loss 0.384674, acc 0.828125
2018-09-12T15:50:49.301888: step 758, loss 0.36209, acc 0.828125
2018-09-12T15:50:49.741913: step 759, loss 0.513339, acc 0.71875
2018-09-12T15:50:50.190939: step 760, loss 0.500554, acc 0.71875
2018-09-12T15:50:50.659966: step 761, loss 0.447796, acc 0.796875
2018-09-12T15:50:51.181996: step 762, loss 0.426248, acc 0.796875
2018-09-12T15:50:51.615021: step 763, loss 0.369092, acc 0.859375
2018-09-12T15:50:52.086047: step 764, loss 0.404465, acc 0.796875
2018-09-12T15:50:52.633079: step 765, loss 0.452337, acc 0.703125
2018-09-12T15:50:53.099105: step 766, loss 0.453532, acc 0.84375
2018-09-12T15:50:53.606134: step 767, loss 0.460574, acc 0.765625
2018-09-12T15:50:54.078161: step 768, loss 0.429507, acc 0.78125
2018-09-12T15:50:54.573190: step 769, loss 0.371847, acc 0.859375
2018-09-12T15:50:55.011215: step 770, loss 0.460975, acc 0.78125
2018-09-12T15:50:55.479242: step 771, loss 0.387424, acc 0.828125
2018-09-12T15:50:55.952269: step 772, loss 0.326478, acc 0.890625
2018-09-12T15:50:56.421295: step 773, loss 0.49398, acc 0.75
2018-09-12T15:50:56.860321: step 774, loss 0.33283, acc 0.859375
2018-09-12T15:50:57.352349: step 775, loss 0.363562, acc 0.828125
2018-09-12T15:50:57.795374: step 776, loss 0.488477, acc 0.734375
2018-09-12T15:50:58.253400: step 777, loss 0.521091, acc 0.6875
2018-09-12T15:50:58.743428: step 778, loss 0.44061, acc 0.78125
2018-09-12T15:50:59.199454: step 779, loss 0.322704, acc 0.859375
2018-09-12T15:50:59.691482: step 780, loss 0.371354, acc 0.84375
2018-09-12T15:51:00.187511: step 781, loss 0.484025, acc 0.75
2018-09-12T15:51:00.706541: step 782, loss 0.437831, acc 0.8125
2018-09-12T15:51:01.120564: step 783, loss 0.452503, acc 0.765625
2018-09-12T15:51:01.576590: step 784, loss 0.418095, acc 0.8125
2018-09-12T15:51:01.983614: step 785, loss 0.397194, acc 0.8125
2018-09-12T15:51:02.445640: step 786, loss 0.50009, acc 0.78125
2018-09-12T15:51:02.930668: step 787, loss 0.417752, acc 0.78125
2018-09-12T15:51:03.357692: step 788, loss 0.607277, acc 0.703125
2018-09-12T15:51:03.813718: step 789, loss 0.577793, acc 0.71875
2018-09-12T15:51:04.282745: step 790, loss 0.423669, acc 0.796875
2018-09-12T15:51:04.732771: step 791, loss 0.480953, acc 0.8125
2018-09-12T15:51:05.162795: step 792, loss 0.40844, acc 0.765625
2018-09-12T15:51:05.621822: step 793, loss 0.416724, acc 0.78125
2018-09-12T15:51:06.049846: step 794, loss 0.362016, acc 0.875
2018-09-12T15:51:06.603878: step 795, loss 0.444904, acc 0.765625
2018-09-12T15:51:07.111105: step 796, loss 0.468484, acc 0.8125
2018-09-12T15:51:07.649334: step 797, loss 0.373556, acc 0.875
2018-09-12T15:51:08.131362: step 798, loss 0.566778, acc 0.734375
2018-09-12T15:51:08.569387: step 799, loss 0.534809, acc 0.8125
2018-09-12T15:51:09.002412: step 800, loss 0.433771, acc 0.8125

Evaluation:
2018-09-12T15:51:10.072072: step 800, loss 0.590596, acc 0.681989

Saved model checkpoint to ./-800

2018-09-12T15:51:11.239139: step 801, loss 0.496362, acc 0.78125
2018-09-12T15:51:11.555157: step 802, loss 0.600716, acc 0.703125
2018-09-12T15:51:11.918177: step 803, loss 0.369438, acc 0.828125
2018-09-12T15:51:12.375204: step 804, loss 0.357195, acc 0.890625
2018-09-12T15:51:12.796228: step 805, loss 0.527297, acc 0.703125
2018-09-12T15:51:13.191250: step 806, loss 0.516765, acc 0.8125
2018-09-12T15:51:13.575272: step 807, loss 0.367156, acc 0.875
2018-09-12T15:51:13.972295: step 808, loss 0.505606, acc 0.765625
2018-09-12T15:51:14.353317: step 809, loss 0.450298, acc 0.75
2018-09-12T15:51:14.746339: step 810, loss 0.444168, acc 0.796875
2018-09-12T15:51:15.130361: step 811, loss 0.416803, acc 0.828125
2018-09-12T15:51:15.507383: step 812, loss 0.507476, acc 0.78125
2018-09-12T15:51:15.901405: step 813, loss 0.358727, acc 0.84375
2018-09-12T15:51:16.284427: step 814, loss 0.466259, acc 0.8125
2018-09-12T15:51:16.783456: step 815, loss 0.523701, acc 0.71875
2018-09-12T15:51:17.184479: step 816, loss 0.491728, acc 0.765625
2018-09-12T15:51:17.590502: step 817, loss 0.40388, acc 0.875
2018-09-12T15:51:17.978524: step 818, loss 0.485152, acc 0.75
2018-09-12T15:51:18.365546: step 819, loss 0.397352, acc 0.828125
2018-09-12T15:51:18.777570: step 820, loss 0.511511, acc 0.703125
2018-09-12T15:51:19.158592: step 821, loss 0.54739, acc 0.75
2018-09-12T15:51:19.552213: step 822, loss 0.431466, acc 0.75
2018-09-12T15:51:19.932235: step 823, loss 0.331222, acc 0.8125
2018-09-12T15:51:20.320257: step 824, loss 0.412159, acc 0.796875
2018-09-12T15:51:20.708279: step 825, loss 0.481546, acc 0.75
2018-09-12T15:51:21.097302: step 826, loss 0.359133, acc 0.84375
2018-09-12T15:51:21.472323: step 827, loss 0.448001, acc 0.765625
2018-09-12T15:51:21.864345: step 828, loss 0.536559, acc 0.71875
2018-09-12T15:51:22.253368: step 829, loss 0.294548, acc 0.90625
2018-09-12T15:51:22.644390: step 830, loss 0.348384, acc 0.875
2018-09-12T15:51:23.027412: step 831, loss 0.43599, acc 0.765625
2018-09-12T15:51:23.410434: step 832, loss 0.406463, acc 0.828125
2018-09-12T15:51:23.796456: step 833, loss 0.407528, acc 0.796875
2018-09-12T15:51:24.187478: step 834, loss 0.385224, acc 0.859375
2018-09-12T15:51:24.602502: step 835, loss 0.460719, acc 0.78125
2018-09-12T15:51:25.008525: step 836, loss 0.472972, acc 0.78125
2018-09-12T15:51:25.427549: step 837, loss 0.308279, acc 0.921875
2018-09-12T15:51:25.856574: step 838, loss 0.526328, acc 0.765625
2018-09-12T15:51:26.287598: step 839, loss 0.491198, acc 0.734375
2018-09-12T15:51:26.726624: step 840, loss 0.501571, acc 0.734375
2018-09-12T15:51:27.155648: step 841, loss 0.506254, acc 0.765625
2018-09-12T15:51:27.588673: step 842, loss 0.556295, acc 0.703125
2018-09-12T15:51:28.001697: step 843, loss 0.337003, acc 0.859375
2018-09-12T15:51:28.393719: step 844, loss 0.278044, acc 0.890625
2018-09-12T15:51:28.770741: step 845, loss 0.381765, acc 0.8125
2018-09-12T15:51:29.165362: step 846, loss 0.403395, acc 0.78125
2018-09-12T15:51:29.554984: step 847, loss 0.41302, acc 0.796875
2018-09-12T15:51:29.940006: step 848, loss 0.387151, acc 0.796875
2018-09-12T15:51:30.332028: step 849, loss 0.348165, acc 0.84375
2018-09-12T15:51:30.709050: step 850, loss 0.493675, acc 0.796875
2018-09-12T15:51:31.100072: step 851, loss 0.377261, acc 0.828125
2018-09-12T15:51:31.476094: step 852, loss 0.317784, acc 0.859375
2018-09-12T15:51:31.868116: step 853, loss 0.593084, acc 0.734375
2018-09-12T15:51:32.258138: step 854, loss 0.491835, acc 0.75
2018-09-12T15:51:32.652161: step 855, loss 0.507082, acc 0.75
2018-09-12T15:51:33.033183: step 856, loss 0.528389, acc 0.75
2018-09-12T15:51:33.420205: step 857, loss 0.388359, acc 0.78125
2018-09-12T15:51:33.810227: step 858, loss 0.480612, acc 0.78125
2018-09-12T15:51:34.194249: step 859, loss 0.572446, acc 0.625
2018-09-12T15:51:34.574271: step 860, loss 0.320196, acc 0.890625
2018-09-12T15:51:34.968293: step 861, loss 0.401198, acc 0.796875
2018-09-12T15:51:35.360316: step 862, loss 0.390427, acc 0.84375
2018-09-12T15:51:35.737337: step 863, loss 0.430616, acc 0.765625
2018-09-12T15:51:36.159361: step 864, loss 0.493598, acc 0.765625
2018-09-12T15:51:36.548384: step 865, loss 0.502298, acc 0.75
2018-09-12T15:51:36.937406: step 866, loss 0.499346, acc 0.765625
2018-09-12T15:51:37.333429: step 867, loss 0.420533, acc 0.828125
2018-09-12T15:51:37.727451: step 868, loss 0.31927, acc 0.859375
2018-09-12T15:51:38.173477: step 869, loss 0.371167, acc 0.828125
2018-09-12T15:51:38.586500: step 870, loss 0.396298, acc 0.8125
2018-09-12T15:51:39.021124: step 871, loss 0.452364, acc 0.796875
2018-09-12T15:51:39.447748: step 872, loss 0.538586, acc 0.734375
2018-09-12T15:51:39.864371: step 873, loss 0.47572, acc 0.796875
2018-09-12T15:51:40.305396: step 874, loss 0.386571, acc 0.828125
2018-09-12T15:51:40.731420: step 875, loss 0.380309, acc 0.84375
2018-09-12T15:51:41.161445: step 876, loss 0.408389, acc 0.828125
2018-09-12T15:51:41.543467: step 877, loss 0.36577, acc 0.8125
2018-09-12T15:51:41.935489: step 878, loss 0.479606, acc 0.75
2018-09-12T15:51:42.324511: step 879, loss 0.578482, acc 0.71875
2018-09-12T15:51:42.719534: step 880, loss 0.454305, acc 0.8125
2018-09-12T15:51:43.111556: step 881, loss 0.621927, acc 0.640625
2018-09-12T15:51:43.495578: step 882, loss 0.440449, acc 0.78125
2018-09-12T15:51:43.873600: step 883, loss 0.373834, acc 0.796875
2018-09-12T15:51:44.265622: step 884, loss 0.43255, acc 0.75
2018-09-12T15:51:44.644644: step 885, loss 0.512888, acc 0.71875
2018-09-12T15:51:45.032666: step 886, loss 0.439248, acc 0.78125
2018-09-12T15:51:45.418688: step 887, loss 0.455788, acc 0.828125
2018-09-12T15:51:45.811711: step 888, loss 0.560851, acc 0.71875
2018-09-12T15:51:46.196733: step 889, loss 0.426112, acc 0.8125
2018-09-12T15:51:46.585755: step 890, loss 0.350988, acc 0.859375
2018-09-12T15:51:46.975777: step 891, loss 0.471718, acc 0.75
2018-09-12T15:51:47.366800: step 892, loss 0.478262, acc 0.71875
2018-09-12T15:51:47.755822: step 893, loss 0.507784, acc 0.71875
2018-09-12T15:51:48.201447: step 894, loss 0.455218, acc 0.796875
2018-09-12T15:51:48.590469: step 895, loss 0.375361, acc 0.84375
2018-09-12T15:51:48.981090: step 896, loss 0.411509, acc 0.828125
2018-09-12T15:51:49.372311: step 897, loss 0.532663, acc 0.703125
2018-09-12T15:51:49.764334: step 898, loss 0.408805, acc 0.765625
2018-09-12T15:51:50.162356: step 899, loss 0.468353, acc 0.78125
2018-09-12T15:51:50.537378: step 900, loss 0.554983, acc 0.616667

Evaluation:
2018-09-12T15:51:51.558436: step 900, loss 0.578714, acc 0.701689

Saved model checkpoint to ./-900

2018-09-12T15:51:52.772506: step 901, loss 0.349764, acc 0.890625
2018-09-12T15:51:53.085523: step 902, loss 0.285425, acc 0.875
2018-09-12T15:51:53.431742: step 903, loss 0.330979, acc 0.875
2018-09-12T15:51:53.779761: step 904, loss 0.298344, acc 0.84375
2018-09-12T15:51:54.248788: step 905, loss 0.403504, acc 0.8125
2018-09-12T15:51:54.611809: step 906, loss 0.300631, acc 0.875
2018-09-12T15:51:54.995831: step 907, loss 0.477056, acc 0.796875
2018-09-12T15:51:55.405854: step 908, loss 0.376943, acc 0.84375
2018-09-12T15:51:55.852880: step 909, loss 0.331483, acc 0.828125
2018-09-12T15:51:56.334908: step 910, loss 0.369393, acc 0.875
2018-09-12T15:51:56.786933: step 911, loss 0.443275, acc 0.796875
2018-09-12T15:51:57.222958: step 912, loss 0.432232, acc 0.78125
2018-09-12T15:51:57.608980: step 913, loss 0.381792, acc 0.859375
2018-09-12T15:51:58.017004: step 914, loss 0.426084, acc 0.796875
2018-09-12T15:51:58.442028: step 915, loss 0.469742, acc 0.828125
2018-09-12T15:51:58.854052: step 916, loss 0.300487, acc 0.890625
2018-09-12T15:51:59.241272: step 917, loss 0.448634, acc 0.8125
2018-09-12T15:51:59.639295: step 918, loss 0.475938, acc 0.71875
2018-09-12T15:52:00.038318: step 919, loss 0.395361, acc 0.828125
2018-09-12T15:52:00.439341: step 920, loss 0.442151, acc 0.8125
2018-09-12T15:52:00.839363: step 921, loss 0.361258, acc 0.828125
2018-09-12T15:52:01.244387: step 922, loss 0.347253, acc 0.84375
2018-09-12T15:52:01.626409: step 923, loss 0.378033, acc 0.84375
2018-09-12T15:52:02.020431: step 924, loss 0.326172, acc 0.859375
2018-09-12T15:52:02.419454: step 925, loss 0.230978, acc 0.921875
2018-09-12T15:52:02.807476: step 926, loss 0.423588, acc 0.78125
2018-09-12T15:52:03.204499: step 927, loss 0.463295, acc 0.78125
2018-09-12T15:52:03.587521: step 928, loss 0.34006, acc 0.875
2018-09-12T15:52:03.980543: step 929, loss 0.36678, acc 0.84375
2018-09-12T15:52:04.364565: step 930, loss 0.372205, acc 0.828125
2018-09-12T15:52:04.759588: step 931, loss 0.46297, acc 0.78125
2018-09-12T15:52:05.139609: step 932, loss 0.345953, acc 0.84375
2018-09-12T15:52:05.532632: step 933, loss 0.300435, acc 0.890625
2018-09-12T15:52:05.926654: step 934, loss 0.345086, acc 0.828125
2018-09-12T15:52:06.309676: step 935, loss 0.443907, acc 0.78125
2018-09-12T15:52:06.687698: step 936, loss 0.393581, acc 0.796875
2018-09-12T15:52:07.084721: step 937, loss 0.29174, acc 0.890625
2018-09-12T15:52:07.477743: step 938, loss 0.436098, acc 0.8125
2018-09-12T15:52:07.876766: step 939, loss 0.283125, acc 0.90625
2018-09-12T15:52:08.272789: step 940, loss 0.47422, acc 0.734375
2018-09-12T15:52:08.686812: step 941, loss 0.301054, acc 0.875
2018-09-12T15:52:09.159839: step 942, loss 0.522806, acc 0.765625
2018-09-12T15:52:09.619465: step 943, loss 0.248125, acc 0.953125
2018-09-12T15:52:10.091492: step 944, loss 0.263762, acc 0.890625
2018-09-12T15:52:10.620522: step 945, loss 0.289397, acc 0.875
2018-09-12T15:52:11.052547: step 946, loss 0.471241, acc 0.765625
2018-09-12T15:52:11.499572: step 947, loss 0.307516, acc 0.84375
2018-09-12T15:52:11.950598: step 948, loss 0.35104, acc 0.828125
2018-09-12T15:52:12.402624: step 949, loss 0.323461, acc 0.859375
2018-09-12T15:52:12.854650: step 950, loss 0.348492, acc 0.828125
2018-09-12T15:52:13.275674: step 951, loss 0.418074, acc 0.765625
2018-09-12T15:52:13.671697: step 952, loss 0.250371, acc 0.9375
2018-09-12T15:52:14.083720: step 953, loss 0.476967, acc 0.75
2018-09-12T15:52:14.547747: step 954, loss 0.284515, acc 0.890625
2018-09-12T15:52:14.954770: step 955, loss 0.461495, acc 0.8125
2018-09-12T15:52:15.421797: step 956, loss 0.538807, acc 0.765625
2018-09-12T15:52:15.867822: step 957, loss 0.523724, acc 0.75
2018-09-12T15:52:16.302847: step 958, loss 0.316058, acc 0.859375
2018-09-12T15:52:16.779473: step 959, loss 0.489252, acc 0.71875
2018-09-12T15:52:17.238500: step 960, loss 0.381945, acc 0.875
2018-09-12T15:52:17.695526: step 961, loss 0.293396, acc 0.90625
2018-09-12T15:52:18.207555: step 962, loss 0.329315, acc 0.859375
2018-09-12T15:52:18.657581: step 963, loss 0.399074, acc 0.796875
2018-09-12T15:52:19.135608: step 964, loss 0.331889, acc 0.84375
2018-09-12T15:52:19.586634: step 965, loss 0.444859, acc 0.75
2018-09-12T15:52:20.053661: step 966, loss 0.426544, acc 0.78125
2018-09-12T15:52:20.534688: step 967, loss 0.283158, acc 0.90625
2018-09-12T15:52:21.029717: step 968, loss 0.397294, acc 0.78125
2018-09-12T15:52:21.511744: step 969, loss 0.475163, acc 0.78125
2018-09-12T15:52:21.976771: step 970, loss 0.413081, acc 0.78125
2018-09-12T15:52:22.457798: step 971, loss 0.360048, acc 0.859375
2018-09-12T15:52:22.918825: step 972, loss 0.352309, acc 0.828125
2018-09-12T15:52:23.373851: step 973, loss 0.315365, acc 0.859375
2018-09-12T15:52:23.859878: step 974, loss 0.369392, acc 0.859375
2018-09-12T15:52:24.276902: step 975, loss 0.364303, acc 0.84375
2018-09-12T15:52:24.785931: step 976, loss 0.401921, acc 0.828125
2018-09-12T15:52:25.275959: step 977, loss 0.343699, acc 0.859375
2018-09-12T15:52:25.774988: step 978, loss 0.325955, acc 0.84375
2018-09-12T15:52:26.179011: step 979, loss 0.39904, acc 0.796875
2018-09-12T15:52:26.619036: step 980, loss 0.315753, acc 0.890625
2018-09-12T15:52:27.054061: step 981, loss 0.401563, acc 0.8125
2018-09-12T15:52:27.477085: step 982, loss 0.441392, acc 0.75
2018-09-12T15:52:27.921111: step 983, loss 0.424159, acc 0.828125
2018-09-12T15:52:28.342135: step 984, loss 0.393886, acc 0.8125
2018-09-12T15:52:28.780160: step 985, loss 0.386288, acc 0.796875
2018-09-12T15:52:29.250786: step 986, loss 0.307349, acc 0.875
2018-09-12T15:52:29.659809: step 987, loss 0.500597, acc 0.765625
2018-09-12T15:52:30.053832: step 988, loss 0.413778, acc 0.78125
2018-09-12T15:52:30.447854: step 989, loss 0.256005, acc 0.90625
2018-09-12T15:52:30.858878: step 990, loss 0.358645, acc 0.796875
2018-09-12T15:52:31.253901: step 991, loss 0.312307, acc 0.875
2018-09-12T15:52:31.667924: step 992, loss 0.45094, acc 0.78125
2018-09-12T15:52:32.059947: step 993, loss 0.40843, acc 0.8125
2018-09-12T15:52:32.445969: step 994, loss 0.356725, acc 0.828125
2018-09-12T15:52:32.842991: step 995, loss 0.451005, acc 0.796875
2018-09-12T15:52:33.224013: step 996, loss 0.400945, acc 0.828125
2018-09-12T15:52:33.623036: step 997, loss 0.291401, acc 0.890625
2018-09-12T15:52:34.001058: step 998, loss 0.380988, acc 0.84375
2018-09-12T15:52:34.400080: step 999, loss 0.447444, acc 0.78125
2018-09-12T15:52:34.801103: step 1000, loss 0.435845, acc 0.828125

Evaluation:
2018-09-12T15:52:35.809161: step 1000, loss 0.604398, acc 0.67636

Saved model checkpoint to ./-1000

2018-09-12T15:52:36.899223: step 1001, loss 0.464104, acc 0.8125
2018-09-12T15:52:37.212241: step 1002, loss 0.394153, acc 0.828125
2018-09-12T15:52:37.563261: step 1003, loss 0.382579, acc 0.828125
2018-09-12T15:52:37.922282: step 1004, loss 0.545839, acc 0.796875
2018-09-12T15:52:38.416310: step 1005, loss 0.38943, acc 0.828125
2018-09-12T15:52:38.834334: step 1006, loss 0.312788, acc 0.875
2018-09-12T15:52:39.229956: step 1007, loss 0.512442, acc 0.78125
2018-09-12T15:52:39.659980: step 1008, loss 0.321991, acc 0.84375
2018-09-12T15:52:40.057003: step 1009, loss 0.398419, acc 0.84375
2018-09-12T15:52:40.444025: step 1010, loss 0.299158, acc 0.875
2018-09-12T15:52:40.860049: step 1011, loss 0.426738, acc 0.8125
2018-09-12T15:52:41.252072: step 1012, loss 0.310094, acc 0.90625
2018-09-12T15:52:41.638094: step 1013, loss 0.359859, acc 0.828125
2018-09-12T15:52:42.030116: step 1014, loss 0.29947, acc 0.84375
2018-09-12T15:52:42.439139: step 1015, loss 0.334481, acc 0.890625
2018-09-12T15:52:42.891165: step 1016, loss 0.383099, acc 0.875
2018-09-12T15:52:43.315190: step 1017, loss 0.383513, acc 0.828125
2018-09-12T15:52:43.772216: step 1018, loss 0.432775, acc 0.796875
2018-09-12T15:52:44.213241: step 1019, loss 0.2691, acc 0.875
2018-09-12T15:52:44.650266: step 1020, loss 0.439293, acc 0.78125
2018-09-12T15:52:45.086291: step 1021, loss 0.302498, acc 0.890625
2018-09-12T15:52:45.487314: step 1022, loss 0.451468, acc 0.828125
2018-09-12T15:52:45.890337: step 1023, loss 0.377135, acc 0.875
2018-09-12T15:52:46.311361: step 1024, loss 0.468561, acc 0.75
2018-09-12T15:52:46.725385: step 1025, loss 0.415281, acc 0.8125
2018-09-12T15:52:47.134408: step 1026, loss 0.370368, acc 0.8125
2018-09-12T15:52:47.524430: step 1027, loss 0.356665, acc 0.828125
2018-09-12T15:52:47.924453: step 1028, loss 0.363349, acc 0.78125
2018-09-12T15:52:48.329476: step 1029, loss 0.449086, acc 0.796875
2018-09-12T15:52:48.730499: step 1030, loss 0.361326, acc 0.8125
2018-09-12T15:52:49.140523: step 1031, loss 0.50806, acc 0.75
2018-09-12T15:52:49.540145: step 1032, loss 0.489049, acc 0.796875
2018-09-12T15:52:49.971169: step 1033, loss 0.351652, acc 0.828125
2018-09-12T15:52:50.361192: step 1034, loss 0.323414, acc 0.890625
2018-09-12T15:52:50.765215: step 1035, loss 0.32481, acc 0.859375
2018-09-12T15:52:51.155435: step 1036, loss 0.32316, acc 0.84375
2018-09-12T15:52:51.562459: step 1037, loss 0.419009, acc 0.78125
2018-09-12T15:52:51.948481: step 1038, loss 0.379036, acc 0.859375
2018-09-12T15:52:52.334503: step 1039, loss 0.370485, acc 0.875
2018-09-12T15:52:52.741526: step 1040, loss 0.389685, acc 0.796875
2018-09-12T15:52:53.139549: step 1041, loss 0.309401, acc 0.859375
2018-09-12T15:52:53.534571: step 1042, loss 0.279707, acc 0.828125
2018-09-12T15:52:53.949595: step 1043, loss 0.251609, acc 0.890625
2018-09-12T15:52:54.350618: step 1044, loss 0.463436, acc 0.78125
2018-09-12T15:52:54.742641: step 1045, loss 0.577324, acc 0.703125
2018-09-12T15:52:55.147664: step 1046, loss 0.415554, acc 0.828125
2018-09-12T15:52:55.534686: step 1047, loss 0.547842, acc 0.765625
2018-09-12T15:52:55.928708: step 1048, loss 0.263293, acc 0.90625
2018-09-12T15:52:56.317731: step 1049, loss 0.319912, acc 0.875
2018-09-12T15:52:56.695752: step 1050, loss 0.331282, acc 0.883333
2018-09-12T15:52:57.081774: step 1051, loss 0.27172, acc 0.875
2018-09-12T15:52:57.472797: step 1052, loss 0.35246, acc 0.8125
2018-09-12T15:52:57.860819: step 1053, loss 0.349913, acc 0.859375
2018-09-12T15:52:58.264842: step 1054, loss 0.322377, acc 0.8125
2018-09-12T15:52:58.702867: step 1055, loss 0.311654, acc 0.859375
2018-09-12T15:52:59.193895: step 1056, loss 0.297015, acc 0.84375
2018-09-12T15:52:59.612518: step 1057, loss 0.216545, acc 0.921875
2018-09-12T15:53:00.152549: step 1058, loss 0.363813, acc 0.859375
2018-09-12T15:53:00.620576: step 1059, loss 0.287419, acc 0.859375
2018-09-12T15:53:01.089603: step 1060, loss 0.381488, acc 0.796875
2018-09-12T15:53:01.494626: step 1061, loss 0.300692, acc 0.875
2018-09-12T15:53:01.886648: step 1062, loss 0.323645, acc 0.859375
2018-09-12T15:53:02.283671: step 1063, loss 0.327042, acc 0.90625
2018-09-12T15:53:02.675693: step 1064, loss 0.282471, acc 0.890625
2018-09-12T15:53:03.065716: step 1065, loss 0.242444, acc 0.875
2018-09-12T15:53:03.464739: step 1066, loss 0.258523, acc 0.890625
2018-09-12T15:53:03.877762: step 1067, loss 0.263271, acc 0.890625
2018-09-12T15:53:04.286786: step 1068, loss 0.264441, acc 0.90625
2018-09-12T15:53:04.700809: step 1069, loss 0.32949, acc 0.875
2018-09-12T15:53:05.104832: step 1070, loss 0.394768, acc 0.828125
2018-09-12T15:53:05.498053: step 1071, loss 0.282168, acc 0.90625
2018-09-12T15:53:05.900076: step 1072, loss 0.413112, acc 0.78125
2018-09-12T15:53:06.322100: step 1073, loss 0.342675, acc 0.84375
2018-09-12T15:53:06.716123: step 1074, loss 0.29909, acc 0.828125
2018-09-12T15:53:07.124745: step 1075, loss 0.226095, acc 0.921875
2018-09-12T15:53:07.511767: step 1076, loss 0.32062, acc 0.859375
2018-09-12T15:53:07.911790: step 1077, loss 0.261495, acc 0.90625
2018-09-12T15:53:08.298812: step 1078, loss 0.386071, acc 0.828125
2018-09-12T15:53:08.697835: step 1079, loss 0.323926, acc 0.875
2018-09-12T15:53:09.088858: step 1080, loss 0.273019, acc 0.859375
2018-09-12T15:53:09.505881: step 1081, loss 0.253574, acc 0.875
2018-09-12T15:53:09.930906: step 1082, loss 0.212266, acc 0.921875
2018-09-12T15:53:10.479937: step 1083, loss 0.283333, acc 0.90625
2018-09-12T15:53:10.941964: step 1084, loss 0.312966, acc 0.875
2018-09-12T15:53:11.450993: step 1085, loss 0.429298, acc 0.8125
2018-09-12T15:53:11.919020: step 1086, loss 0.434732, acc 0.78125
2018-09-12T15:53:12.364045: step 1087, loss 0.450293, acc 0.84375
2018-09-12T15:53:12.799070: step 1088, loss 0.286371, acc 0.921875
2018-09-12T15:53:13.212093: step 1089, loss 0.284652, acc 0.90625
2018-09-12T15:53:13.627117: step 1090, loss 0.220558, acc 0.90625
2018-09-12T15:53:14.074143: step 1091, loss 0.478222, acc 0.71875
2018-09-12T15:53:14.571171: step 1092, loss 0.446155, acc 0.796875
2018-09-12T15:53:15.123203: step 1093, loss 0.196967, acc 0.984375
2018-09-12T15:53:15.656233: step 1094, loss 0.201522, acc 0.90625
2018-09-12T15:53:16.156262: step 1095, loss 0.31065, acc 0.859375
2018-09-12T15:53:16.704293: step 1096, loss 0.268283, acc 0.875
2018-09-12T15:53:17.219323: step 1097, loss 0.302594, acc 0.859375
2018-09-12T15:53:17.847359: step 1098, loss 0.294617, acc 0.875
2018-09-12T15:53:18.341387: step 1099, loss 0.196717, acc 0.9375
2018-09-12T15:53:18.897419: step 1100, loss 0.325475, acc 0.859375

Evaluation:
2018-09-12T15:53:20.271497: step 1100, loss 0.576764, acc 0.72045

Saved model checkpoint to ./-1100

2018-09-12T15:53:23.113660: step 1101, loss 0.290182, acc 0.859375
2018-09-12T15:53:23.455679: step 1102, loss 0.238266, acc 0.890625
2018-09-12T15:53:23.852702: step 1103, loss 0.368279, acc 0.828125
2018-09-12T15:53:24.282727: step 1104, loss 0.343859, acc 0.84375
2018-09-12T15:53:24.724752: step 1105, loss 0.231829, acc 0.890625
2018-09-12T15:53:25.253782: step 1106, loss 0.236445, acc 0.90625
2018-09-12T15:53:25.657805: step 1107, loss 0.339031, acc 0.8125
2018-09-12T15:53:26.068829: step 1108, loss 0.434776, acc 0.78125
2018-09-12T15:53:26.460851: step 1109, loss 0.17814, acc 0.953125
2018-09-12T15:53:26.856874: step 1110, loss 0.44349, acc 0.828125
2018-09-12T15:53:27.266897: step 1111, loss 0.22138, acc 0.9375
2018-09-12T15:53:27.735924: step 1112, loss 0.307308, acc 0.875
2018-09-12T15:53:28.192950: step 1113, loss 0.425785, acc 0.828125
2018-09-12T15:53:28.687979: step 1114, loss 0.275199, acc 0.921875
2018-09-12T15:53:29.123004: step 1115, loss 0.408978, acc 0.828125
2018-09-12T15:53:29.546028: step 1116, loss 0.314889, acc 0.890625
2018-09-12T15:53:29.952051: step 1117, loss 0.413429, acc 0.78125
2018-09-12T15:53:30.413077: step 1118, loss 0.293354, acc 0.875
2018-09-12T15:53:30.856103: step 1119, loss 0.194536, acc 0.921875
2018-09-12T15:53:31.333130: step 1120, loss 0.326188, acc 0.859375
2018-09-12T15:53:31.810157: step 1121, loss 0.322497, acc 0.859375
2018-09-12T15:53:32.354188: step 1122, loss 0.206206, acc 0.9375
2018-09-12T15:53:32.760212: step 1123, loss 0.342543, acc 0.84375
2018-09-12T15:53:33.152234: step 1124, loss 0.247709, acc 0.90625
2018-09-12T15:53:33.544256: step 1125, loss 0.186593, acc 0.9375
2018-09-12T15:53:33.960280: step 1126, loss 0.316395, acc 0.828125
2018-09-12T15:53:34.424307: step 1127, loss 0.444661, acc 0.828125
2018-09-12T15:53:34.906334: step 1128, loss 0.285176, acc 0.859375
2018-09-12T15:53:35.400363: step 1129, loss 0.320604, acc 0.84375
2018-09-12T15:53:35.784385: step 1130, loss 0.272583, acc 0.890625
2018-09-12T15:53:36.218409: step 1131, loss 0.221375, acc 0.953125
2018-09-12T15:53:36.684436: step 1132, loss 0.443399, acc 0.8125
2018-09-12T15:53:37.209466: step 1133, loss 0.141685, acc 0.96875
2018-09-12T15:53:37.801500: step 1134, loss 0.289031, acc 0.8125
2018-09-12T15:53:38.475538: step 1135, loss 0.333715, acc 0.859375
2018-09-12T15:53:39.067572: step 1136, loss 0.263907, acc 0.875
2018-09-12T15:53:39.634605: step 1137, loss 0.206168, acc 0.9375
2018-09-12T15:53:40.194637: step 1138, loss 0.217809, acc 0.890625
2018-09-12T15:53:40.677664: step 1139, loss 0.200243, acc 0.921875
2018-09-12T15:53:41.188694: step 1140, loss 0.209409, acc 0.875
2018-09-12T15:53:41.673721: step 1141, loss 0.165077, acc 0.953125
2018-09-12T15:53:42.177750: step 1142, loss 0.363636, acc 0.84375
2018-09-12T15:53:42.690780: step 1143, loss 0.286691, acc 0.859375
2018-09-12T15:53:43.128805: step 1144, loss 0.356588, acc 0.84375
2018-09-12T15:53:43.618833: step 1145, loss 0.194105, acc 0.953125
2018-09-12T15:53:44.126862: step 1146, loss 0.28522, acc 0.90625
2018-09-12T15:53:44.559886: step 1147, loss 0.282316, acc 0.84375
2018-09-12T15:53:45.075916: step 1148, loss 0.33622, acc 0.875
2018-09-12T15:53:45.577945: step 1149, loss 0.339488, acc 0.8125
2018-09-12T15:53:46.082974: step 1150, loss 0.274268, acc 0.921875
2018-09-12T15:53:46.642006: step 1151, loss 0.186734, acc 0.921875
2018-09-12T15:53:47.209038: step 1152, loss 0.361063, acc 0.796875
2018-09-12T15:53:47.684065: step 1153, loss 0.322748, acc 0.828125
2018-09-12T15:53:48.156092: step 1154, loss 0.311168, acc 0.828125
2018-09-12T15:53:48.629119: step 1155, loss 0.46658, acc 0.859375
2018-09-12T15:53:49.068144: step 1156, loss 0.262284, acc 0.875
2018-09-12T15:53:49.517170: step 1157, loss 0.322837, acc 0.875
2018-09-12T15:53:49.934194: step 1158, loss 0.353567, acc 0.828125
2018-09-12T15:53:50.361218: step 1159, loss 0.333624, acc 0.890625
2018-09-12T15:53:50.765241: step 1160, loss 0.464331, acc 0.78125
2018-09-12T15:53:51.172265: step 1161, loss 0.326158, acc 0.890625
2018-09-12T15:53:51.571287: step 1162, loss 0.256822, acc 0.859375
2018-09-12T15:53:51.995312: step 1163, loss 0.25432, acc 0.875
2018-09-12T15:53:52.422336: step 1164, loss 0.464328, acc 0.8125
2018-09-12T15:53:52.837360: step 1165, loss 0.354514, acc 0.875
2018-09-12T15:53:53.230382: step 1166, loss 0.287025, acc 0.875
2018-09-12T15:53:53.638406: step 1167, loss 0.233835, acc 0.9375
2018-09-12T15:53:54.044429: step 1168, loss 0.276498, acc 0.875
2018-09-12T15:53:54.445452: step 1169, loss 0.199957, acc 0.9375
2018-09-12T15:53:54.862476: step 1170, loss 0.373386, acc 0.828125
2018-09-12T15:53:55.255498: step 1171, loss 0.378709, acc 0.828125
2018-09-12T15:53:55.655521: step 1172, loss 0.292309, acc 0.890625
2018-09-12T15:53:56.095546: step 1173, loss 0.280647, acc 0.875
2018-09-12T15:53:56.516570: step 1174, loss 0.249805, acc 0.890625
2018-09-12T15:53:56.926594: step 1175, loss 0.346571, acc 0.84375
2018-09-12T15:53:57.340617: step 1176, loss 0.184406, acc 0.953125
2018-09-12T15:53:57.747641: step 1177, loss 0.261999, acc 0.875
2018-09-12T15:53:58.153664: step 1178, loss 0.389753, acc 0.8125
2018-09-12T15:53:58.542686: step 1179, loss 0.412397, acc 0.796875
2018-09-12T15:53:58.930708: step 1180, loss 0.309848, acc 0.890625
2018-09-12T15:53:59.336731: step 1181, loss 0.248777, acc 0.890625
2018-09-12T15:53:59.735754: step 1182, loss 0.344272, acc 0.859375
2018-09-12T15:54:00.143777: step 1183, loss 0.303312, acc 0.90625
2018-09-12T15:54:00.551800: step 1184, loss 0.23615, acc 0.875
2018-09-12T15:54:00.944823: step 1185, loss 0.34139, acc 0.84375
2018-09-12T15:54:01.346846: step 1186, loss 0.201841, acc 0.921875
2018-09-12T15:54:01.728867: step 1187, loss 0.376613, acc 0.84375
2018-09-12T15:54:02.129890: step 1188, loss 0.339309, acc 0.875
2018-09-12T15:54:02.581916: step 1189, loss 0.374319, acc 0.875
2018-09-12T15:54:03.002940: step 1190, loss 0.277587, acc 0.859375
2018-09-12T15:54:03.424964: step 1191, loss 0.234376, acc 0.90625
2018-09-12T15:54:03.864988: step 1192, loss 0.3574, acc 0.859375
2018-09-12T15:54:04.305014: step 1193, loss 0.323424, acc 0.828125
2018-09-12T15:54:04.745039: step 1194, loss 0.418432, acc 0.8125
2018-09-12T15:54:05.191064: step 1195, loss 0.251631, acc 0.90625
2018-09-12T15:54:05.588087: step 1196, loss 0.407042, acc 0.796875
2018-09-12T15:54:05.994110: step 1197, loss 0.223304, acc 0.921875
2018-09-12T15:54:06.383132: step 1198, loss 0.358096, acc 0.875
2018-09-12T15:54:06.795156: step 1199, loss 0.274198, acc 0.875
2018-09-12T15:54:07.175178: step 1200, loss 0.345374, acc 0.8

Evaluation:
2018-09-12T15:54:08.203237: step 1200, loss 0.591004, acc 0.711069

Saved model checkpoint to ./-1200

2018-09-12T15:54:09.544313: step 1201, loss 0.294558, acc 0.859375
2018-09-12T15:54:09.931335: step 1202, loss 0.194401, acc 0.921875
2018-09-12T15:54:10.277355: step 1203, loss 0.175441, acc 0.9375
2018-09-12T15:54:10.722381: step 1204, loss 0.182799, acc 0.9375
2018-09-12T15:54:11.101402: step 1205, loss 0.306899, acc 0.875
2018-09-12T15:54:11.518426: step 1206, loss 0.167402, acc 0.9375
2018-09-12T15:54:11.940450: step 1207, loss 0.200974, acc 0.921875
2018-09-12T15:54:12.327472: step 1208, loss 0.31945, acc 0.859375
2018-09-12T15:54:12.738496: step 1209, loss 0.217839, acc 0.90625
2018-09-12T15:54:13.128518: step 1210, loss 0.210788, acc 0.921875
2018-09-12T15:54:13.537542: step 1211, loss 0.316556, acc 0.875
2018-09-12T15:54:13.952565: step 1212, loss 0.264615, acc 0.921875
2018-09-12T15:54:14.373589: step 1213, loss 0.300563, acc 0.828125
2018-09-12T15:54:14.795614: step 1214, loss 0.267777, acc 0.890625
2018-09-12T15:54:15.196637: step 1215, loss 0.206126, acc 0.921875
2018-09-12T15:54:15.600660: step 1216, loss 0.237837, acc 0.921875
2018-09-12T15:54:15.998682: step 1217, loss 0.274975, acc 0.859375
2018-09-12T15:54:16.391705: step 1218, loss 0.21668, acc 0.90625
2018-09-12T15:54:16.820729: step 1219, loss 0.192839, acc 0.90625
2018-09-12T15:54:17.218752: step 1220, loss 0.29878, acc 0.84375
2018-09-12T15:54:17.639776: step 1221, loss 0.254706, acc 0.890625
2018-09-12T15:54:18.042799: step 1222, loss 0.234094, acc 0.921875
2018-09-12T15:54:18.462823: step 1223, loss 0.262591, acc 0.890625
2018-09-12T15:54:18.911849: step 1224, loss 0.286853, acc 0.890625
2018-09-12T15:54:19.362875: step 1225, loss 0.309458, acc 0.84375
2018-09-12T15:54:19.842902: step 1226, loss 0.172075, acc 0.96875
2018-09-12T15:54:20.282927: step 1227, loss 0.23783, acc 0.875
2018-09-12T15:54:20.762955: step 1228, loss 0.185758, acc 0.9375
2018-09-12T15:54:21.222981: step 1229, loss 0.203315, acc 0.890625
2018-09-12T15:54:21.648006: step 1230, loss 0.226975, acc 0.890625
2018-09-12T15:54:22.053029: step 1231, loss 0.201222, acc 0.921875
2018-09-12T15:54:22.459052: step 1232, loss 0.184635, acc 0.9375
2018-09-12T15:54:22.906078: step 1233, loss 0.300232, acc 0.875
2018-09-12T15:54:23.306100: step 1234, loss 0.16637, acc 0.953125
2018-09-12T15:54:23.714124: step 1235, loss 0.247753, acc 0.875
2018-09-12T15:54:24.117147: step 1236, loss 0.191336, acc 0.921875
2018-09-12T15:54:24.518170: step 1237, loss 0.273055, acc 0.921875
2018-09-12T15:54:24.937194: step 1238, loss 0.319154, acc 0.859375
2018-09-12T15:54:25.351217: step 1239, loss 0.309807, acc 0.875
2018-09-12T15:54:25.746240: step 1240, loss 0.226888, acc 0.875
2018-09-12T15:54:26.182265: step 1241, loss 0.348722, acc 0.90625
2018-09-12T15:54:26.575287: step 1242, loss 0.215221, acc 0.90625
2018-09-12T15:54:26.990311: step 1243, loss 0.209489, acc 0.90625
2018-09-12T15:54:27.387334: step 1244, loss 0.235698, acc 0.90625
2018-09-12T15:54:27.838360: step 1245, loss 0.277802, acc 0.921875
2018-09-12T15:54:28.253383: step 1246, loss 0.416063, acc 0.84375
2018-09-12T15:54:28.664407: step 1247, loss 0.234169, acc 0.90625
2018-09-12T15:54:29.060430: step 1248, loss 0.338694, acc 0.875
2018-09-12T15:54:29.486454: step 1249, loss 0.284326, acc 0.890625
2018-09-12T15:54:29.933479: step 1250, loss 0.215855, acc 0.9375
2018-09-12T15:54:30.344503: step 1251, loss 0.281593, acc 0.859375
2018-09-12T15:54:30.752526: step 1252, loss 0.258632, acc 0.90625
2018-09-12T15:54:31.148549: step 1253, loss 0.20215, acc 0.921875
2018-09-12T15:54:31.561573: step 1254, loss 0.208867, acc 0.9375
2018-09-12T15:54:31.982597: step 1255, loss 0.139442, acc 0.953125
2018-09-12T15:54:32.378619: step 1256, loss 0.251878, acc 0.890625
2018-09-12T15:54:32.771642: step 1257, loss 0.318282, acc 0.859375
2018-09-12T15:54:33.188666: step 1258, loss 0.317173, acc 0.859375
2018-09-12T15:54:33.578688: step 1259, loss 0.28444, acc 0.859375
2018-09-12T15:54:33.990712: step 1260, loss 0.174011, acc 0.921875
2018-09-12T15:54:34.433737: step 1261, loss 0.178201, acc 0.921875
2018-09-12T15:54:34.940766: step 1262, loss 0.249123, acc 0.875
2018-09-12T15:54:35.390792: step 1263, loss 0.217991, acc 0.921875
2018-09-12T15:54:35.881820: step 1264, loss 0.267489, acc 0.859375
2018-09-12T15:54:36.360847: step 1265, loss 0.213798, acc 0.890625
2018-09-12T15:54:36.820873: step 1266, loss 0.345402, acc 0.890625
2018-09-12T15:54:37.268899: step 1267, loss 0.341463, acc 0.8125
2018-09-12T15:54:37.674922: step 1268, loss 0.256812, acc 0.890625
2018-09-12T15:54:38.076945: step 1269, loss 0.331194, acc 0.875
2018-09-12T15:54:38.485969: step 1270, loss 0.217207, acc 0.875
2018-09-12T15:54:38.897992: step 1271, loss 0.331777, acc 0.84375
2018-09-12T15:54:39.309016: step 1272, loss 0.251921, acc 0.84375
2018-09-12T15:54:39.727040: step 1273, loss 0.340759, acc 0.875
2018-09-12T15:54:40.133063: step 1274, loss 0.215728, acc 0.921875
2018-09-12T15:54:40.554087: step 1275, loss 0.200828, acc 0.9375
2018-09-12T15:54:40.964110: step 1276, loss 0.26295, acc 0.859375
2018-09-12T15:54:41.349132: step 1277, loss 0.185423, acc 0.953125
2018-09-12T15:54:41.746155: step 1278, loss 0.289595, acc 0.90625
2018-09-12T15:54:42.143178: step 1279, loss 0.234176, acc 0.9375
2018-09-12T15:54:42.536200: step 1280, loss 0.344187, acc 0.84375
2018-09-12T15:54:42.936223: step 1281, loss 0.334274, acc 0.875
2018-09-12T15:54:43.336246: step 1282, loss 0.285997, acc 0.859375
2018-09-12T15:54:43.723268: step 1283, loss 0.238857, acc 0.953125
2018-09-12T15:54:44.139292: step 1284, loss 0.140099, acc 0.953125
2018-09-12T15:54:44.581317: step 1285, loss 0.178643, acc 0.921875
2018-09-12T15:54:45.006342: step 1286, loss 0.262748, acc 0.875
2018-09-12T15:54:45.413365: step 1287, loss 0.245752, acc 0.875
2018-09-12T15:54:45.850390: step 1288, loss 0.179818, acc 0.921875
2018-09-12T15:54:46.255413: step 1289, loss 0.229311, acc 0.921875
2018-09-12T15:54:46.663436: step 1290, loss 0.278933, acc 0.890625
2018-09-12T15:54:47.064459: step 1291, loss 0.360874, acc 0.828125
2018-09-12T15:54:47.455482: step 1292, loss 0.222406, acc 0.921875
2018-09-12T15:54:47.854504: step 1293, loss 0.251466, acc 0.875
2018-09-12T15:54:48.246527: step 1294, loss 0.222045, acc 0.90625
2018-09-12T15:54:48.627549: step 1295, loss 0.275021, acc 0.875
2018-09-12T15:54:49.037572: step 1296, loss 0.299579, acc 0.875
2018-09-12T15:54:49.472597: step 1297, loss 0.258454, acc 0.890625
2018-09-12T15:54:49.917622: step 1298, loss 0.250853, acc 0.890625
2018-09-12T15:54:50.371648: step 1299, loss 0.219778, acc 0.921875
2018-09-12T15:54:50.846676: step 1300, loss 0.251089, acc 0.875

Evaluation:
2018-09-12T15:54:52.124749: step 1300, loss 0.576097, acc 0.726079

Saved model checkpoint to ./-1300

2018-09-12T15:54:55.155922: step 1301, loss 0.235592, acc 0.890625
2018-09-12T15:54:55.460940: step 1302, loss 0.265519, acc 0.875
2018-09-12T15:54:55.796959: step 1303, loss 0.249782, acc 0.90625
2018-09-12T15:54:56.139978: step 1304, loss 0.259772, acc 0.875
2018-09-12T15:54:56.524000: step 1305, loss 0.143115, acc 0.953125
2018-09-12T15:54:57.088033: step 1306, loss 0.307453, acc 0.875
2018-09-12T15:54:57.475055: step 1307, loss 0.260274, acc 0.859375
2018-09-12T15:54:57.918080: step 1308, loss 0.249431, acc 0.921875
2018-09-12T15:54:58.334104: step 1309, loss 0.352892, acc 0.828125
2018-09-12T15:54:58.765129: step 1310, loss 0.12347, acc 0.96875
2018-09-12T15:54:59.166151: step 1311, loss 0.346201, acc 0.828125
2018-09-12T15:54:59.546173: step 1312, loss 0.381634, acc 0.828125
2018-09-12T15:54:59.958197: step 1313, loss 0.212832, acc 0.953125
2018-09-12T15:55:00.392222: step 1314, loss 0.237794, acc 0.90625
2018-09-12T15:55:00.864249: step 1315, loss 0.194721, acc 0.921875
2018-09-12T15:55:01.319275: step 1316, loss 0.246614, acc 0.890625
2018-09-12T15:55:01.751299: step 1317, loss 0.163853, acc 0.921875
2018-09-12T15:55:02.226326: step 1318, loss 0.216025, acc 0.9375
2018-09-12T15:55:02.722355: step 1319, loss 0.400333, acc 0.859375
2018-09-12T15:55:03.178381: step 1320, loss 0.241677, acc 0.890625
2018-09-12T15:55:03.679410: step 1321, loss 0.220306, acc 0.890625
2018-09-12T15:55:04.167438: step 1322, loss 0.312899, acc 0.875
2018-09-12T15:55:04.659466: step 1323, loss 0.394633, acc 0.78125
2018-09-12T15:55:05.176495: step 1324, loss 0.277576, acc 0.890625
2018-09-12T15:55:05.636522: step 1325, loss 0.378757, acc 0.84375
2018-09-12T15:55:06.053545: step 1326, loss 0.270108, acc 0.921875
2018-09-12T15:55:06.508571: step 1327, loss 0.33258, acc 0.84375
2018-09-12T15:55:06.932596: step 1328, loss 0.14981, acc 0.921875
2018-09-12T15:55:07.369621: step 1329, loss 0.240716, acc 0.875
2018-09-12T15:55:07.811646: step 1330, loss 0.224141, acc 0.890625
2018-09-12T15:55:08.274672: step 1331, loss 0.352412, acc 0.828125
2018-09-12T15:55:08.727698: step 1332, loss 0.260661, acc 0.90625
2018-09-12T15:55:09.149722: step 1333, loss 0.180467, acc 0.921875
2018-09-12T15:55:09.581747: step 1334, loss 0.256803, acc 0.875
2018-09-12T15:55:10.070775: step 1335, loss 0.242063, acc 0.859375
2018-09-12T15:55:10.538802: step 1336, loss 0.180932, acc 0.90625
2018-09-12T15:55:11.104834: step 1337, loss 0.196863, acc 0.90625
2018-09-12T15:55:11.541859: step 1338, loss 0.258022, acc 0.90625
2018-09-12T15:55:11.945882: step 1339, loss 0.309827, acc 0.859375
2018-09-12T15:55:12.417909: step 1340, loss 0.206595, acc 0.90625
2018-09-12T15:55:12.980942: step 1341, loss 0.175659, acc 0.953125
2018-09-12T15:55:13.430967: step 1342, loss 0.204407, acc 0.890625
2018-09-12T15:55:13.857992: step 1343, loss 0.194374, acc 0.921875
2018-09-12T15:55:14.365021: step 1344, loss 0.261662, acc 0.875
2018-09-12T15:55:14.847048: step 1345, loss 0.217855, acc 0.875
2018-09-12T15:55:15.287074: step 1346, loss 0.145963, acc 0.9375
2018-09-12T15:55:15.722098: step 1347, loss 0.155808, acc 0.9375
2018-09-12T15:55:16.175124: step 1348, loss 0.338239, acc 0.84375
2018-09-12T15:55:16.613149: step 1349, loss 0.187925, acc 0.90625
2018-09-12T15:55:17.019173: step 1350, loss 0.255057, acc 0.9
2018-09-12T15:55:17.508201: step 1351, loss 0.268567, acc 0.90625
2018-09-12T15:55:17.950226: step 1352, loss 0.162003, acc 0.921875
2018-09-12T15:55:18.405252: step 1353, loss 0.149209, acc 0.953125
2018-09-12T15:55:18.830276: step 1354, loss 0.214269, acc 0.90625
2018-09-12T15:55:19.255300: step 1355, loss 0.267017, acc 0.890625
2018-09-12T15:55:19.724327: step 1356, loss 0.198573, acc 0.9375
2018-09-12T15:55:20.218356: step 1357, loss 0.277389, acc 0.890625
2018-09-12T15:55:20.681382: step 1358, loss 0.264886, acc 0.890625
2018-09-12T15:55:21.239414: step 1359, loss 0.112925, acc 0.96875
2018-09-12T15:55:21.713441: step 1360, loss 0.245555, acc 0.875
2018-09-12T15:55:22.171467: step 1361, loss 0.276615, acc 0.875
2018-09-12T15:55:22.648495: step 1362, loss 0.220128, acc 0.90625
2018-09-12T15:55:23.178525: step 1363, loss 0.128775, acc 0.953125
2018-09-12T15:55:23.679554: step 1364, loss 0.211239, acc 0.921875
2018-09-12T15:55:24.175582: step 1365, loss 0.116397, acc 0.96875
2018-09-12T15:55:24.666610: step 1366, loss 0.27237, acc 0.859375
2018-09-12T15:55:25.163638: step 1367, loss 0.173003, acc 0.921875
2018-09-12T15:55:25.652666: step 1368, loss 0.22043, acc 0.9375
2018-09-12T15:55:26.133694: step 1369, loss 0.189645, acc 0.90625
2018-09-12T15:55:26.531717: step 1370, loss 0.126567, acc 0.96875
2018-09-12T15:55:26.949741: step 1371, loss 0.176685, acc 0.9375
2018-09-12T15:55:27.353764: step 1372, loss 0.119875, acc 0.953125
2018-09-12T15:55:27.787789: step 1373, loss 0.30334, acc 0.84375
2018-09-12T15:55:28.206812: step 1374, loss 0.213394, acc 0.921875
2018-09-12T15:55:28.622836: step 1375, loss 0.177446, acc 0.90625
2018-09-12T15:55:29.022859: step 1376, loss 0.178802, acc 0.921875
2018-09-12T15:55:29.424882: step 1377, loss 0.176645, acc 0.9375
2018-09-12T15:55:29.818905: step 1378, loss 0.33609, acc 0.875
2018-09-12T15:55:30.206927: step 1379, loss 0.13367, acc 0.953125
2018-09-12T15:55:30.586949: step 1380, loss 0.249074, acc 0.9375
2018-09-12T15:55:30.974971: step 1381, loss 0.268702, acc 0.90625
2018-09-12T15:55:31.390995: step 1382, loss 0.20895, acc 0.90625
2018-09-12T15:55:31.816019: step 1383, loss 0.167188, acc 0.953125
2018-09-12T15:55:32.223042: step 1384, loss 0.2197, acc 0.90625
2018-09-12T15:55:32.685069: step 1385, loss 0.235567, acc 0.875
2018-09-12T15:55:33.068091: step 1386, loss 0.169827, acc 0.953125
2018-09-12T15:55:33.510116: step 1387, loss 0.239713, acc 0.890625
2018-09-12T15:55:34.016145: step 1388, loss 0.202181, acc 0.90625
2018-09-12T15:55:34.481171: step 1389, loss 0.11216, acc 0.96875
2018-09-12T15:55:35.107207: step 1390, loss 0.241367, acc 0.890625
2018-09-12T15:55:35.598235: step 1391, loss 0.124535, acc 0.953125
2018-09-12T15:55:36.092264: step 1392, loss 0.213339, acc 0.890625
2018-09-12T15:55:36.611293: step 1393, loss 0.159824, acc 0.921875
2018-09-12T15:55:37.129323: step 1394, loss 0.209672, acc 0.90625
2018-09-12T15:55:37.603350: step 1395, loss 0.168593, acc 0.9375
2018-09-12T15:55:38.067376: step 1396, loss 0.239558, acc 0.890625
2018-09-12T15:55:38.559405: step 1397, loss 0.174098, acc 0.921875
2018-09-12T15:55:39.042432: step 1398, loss 0.117099, acc 0.96875
2018-09-12T15:55:39.551461: step 1399, loss 0.288206, acc 0.890625
2018-09-12T15:55:39.957485: step 1400, loss 0.363872, acc 0.84375

Evaluation:
2018-09-12T15:55:41.215557: step 1400, loss 0.584827, acc 0.733584

Saved model checkpoint to ./-1400

2018-09-12T15:55:44.940770: step 1401, loss 0.224452, acc 0.921875
2018-09-12T15:55:45.329792: step 1402, loss 0.270594, acc 0.90625
2018-09-12T15:55:45.783818: step 1403, loss 0.274521, acc 0.921875
2018-09-12T15:55:46.313848: step 1404, loss 0.238617, acc 0.90625
2018-09-12T15:55:46.855879: step 1405, loss 0.170041, acc 0.953125
2018-09-12T15:55:47.256902: step 1406, loss 0.137096, acc 0.96875
2018-09-12T15:55:47.654925: step 1407, loss 0.128096, acc 0.9375
2018-09-12T15:55:48.115951: step 1408, loss 0.147696, acc 0.9375
2018-09-12T15:55:48.513974: step 1409, loss 0.343775, acc 0.859375
2018-09-12T15:55:48.994001: step 1410, loss 0.226487, acc 0.90625
2018-09-12T15:55:49.424026: step 1411, loss 0.142758, acc 0.96875
2018-09-12T15:55:49.884052: step 1412, loss 0.224833, acc 0.890625
2018-09-12T15:55:50.287075: step 1413, loss 0.146102, acc 0.953125
2018-09-12T15:55:50.708099: step 1414, loss 0.210592, acc 0.921875
2018-09-12T15:55:51.115123: step 1415, loss 0.223348, acc 0.890625
2018-09-12T15:55:51.511145: step 1416, loss 0.1778, acc 0.953125
2018-09-12T15:55:51.946170: step 1417, loss 0.218, acc 0.921875
2018-09-12T15:55:52.362194: step 1418, loss 0.139679, acc 0.96875
2018-09-12T15:55:52.771217: step 1419, loss 0.235799, acc 0.921875
2018-09-12T15:55:53.159240: step 1420, loss 0.235289, acc 0.875
2018-09-12T15:55:53.602265: step 1421, loss 0.212196, acc 0.9375
2018-09-12T15:55:54.045290: step 1422, loss 0.260955, acc 0.90625
2018-09-12T15:55:54.511317: step 1423, loss 0.135838, acc 0.953125
2018-09-12T15:55:55.009346: step 1424, loss 0.202852, acc 0.90625
2018-09-12T15:55:55.451371: step 1425, loss 0.236662, acc 0.90625
2018-09-12T15:55:55.886396: step 1426, loss 0.186331, acc 0.90625
2018-09-12T15:55:56.362423: step 1427, loss 0.217907, acc 0.90625
2018-09-12T15:55:56.851451: step 1428, loss 0.157646, acc 0.890625
2018-09-12T15:55:57.353480: step 1429, loss 0.196109, acc 0.921875
2018-09-12T15:55:57.863509: step 1430, loss 0.191147, acc 0.921875
2018-09-12T15:55:58.299534: step 1431, loss 0.268325, acc 0.859375
2018-09-12T15:55:58.758560: step 1432, loss 0.219002, acc 0.90625
2018-09-12T15:55:59.173584: step 1433, loss 0.155685, acc 0.9375
2018-09-12T15:55:59.595608: step 1434, loss 0.184007, acc 0.953125
2018-09-12T15:55:59.981630: step 1435, loss 0.140162, acc 0.96875
2018-09-12T15:56:00.379653: step 1436, loss 0.204378, acc 0.90625
2018-09-12T15:56:00.775675: step 1437, loss 0.158343, acc 0.9375
2018-09-12T15:56:01.171698: step 1438, loss 0.193835, acc 0.9375
2018-09-12T15:56:01.558720: step 1439, loss 0.177585, acc 0.921875
2018-09-12T15:56:01.960743: step 1440, loss 0.20482, acc 0.90625
2018-09-12T15:56:02.341765: step 1441, loss 0.131471, acc 0.953125
2018-09-12T15:56:02.732787: step 1442, loss 0.121177, acc 0.9375
2018-09-12T15:56:03.129810: step 1443, loss 0.178464, acc 0.9375
2018-09-12T15:56:03.537833: step 1444, loss 0.222922, acc 0.90625
2018-09-12T15:56:03.947857: step 1445, loss 0.164609, acc 0.953125
2018-09-12T15:56:04.355880: step 1446, loss 0.0770714, acc 0.96875
2018-09-12T15:56:04.752903: step 1447, loss 0.164132, acc 0.953125
2018-09-12T15:56:05.150926: step 1448, loss 0.192279, acc 0.921875
2018-09-12T15:56:05.576950: step 1449, loss 0.192756, acc 0.953125
2018-09-12T15:56:05.999974: step 1450, loss 0.159329, acc 0.9375
2018-09-12T15:56:06.410998: step 1451, loss 0.268623, acc 0.875
2018-09-12T15:56:06.841022: step 1452, loss 0.185384, acc 0.9375
2018-09-12T15:56:07.276047: step 1453, loss 0.213431, acc 0.859375
2018-09-12T15:56:07.663069: step 1454, loss 0.200035, acc 0.90625
2018-09-12T15:56:08.069092: step 1455, loss 0.143859, acc 0.96875
2018-09-12T15:56:08.460115: step 1456, loss 0.144027, acc 0.953125
2018-09-12T15:56:08.870138: step 1457, loss 0.149378, acc 0.953125
2018-09-12T15:56:09.265161: step 1458, loss 0.359603, acc 0.84375
2018-09-12T15:56:09.665184: step 1459, loss 0.211573, acc 0.90625
2018-09-12T15:56:10.060206: step 1460, loss 0.352951, acc 0.8125
2018-09-12T15:56:10.510232: step 1461, loss 0.186293, acc 0.921875
2018-09-12T15:56:10.976259: step 1462, loss 0.171722, acc 0.9375
2018-09-12T15:56:11.420284: step 1463, loss 0.0850528, acc 0.96875
2018-09-12T15:56:11.860309: step 1464, loss 0.220161, acc 0.90625
2018-09-12T15:56:12.287334: step 1465, loss 0.287439, acc 0.890625
2018-09-12T15:56:12.729359: step 1466, loss 0.119802, acc 0.96875
2018-09-12T15:56:13.187385: step 1467, loss 0.193979, acc 0.921875
2018-09-12T15:56:13.596409: step 1468, loss 0.228991, acc 0.890625
2018-09-12T15:56:13.998432: step 1469, loss 0.234438, acc 0.890625
2018-09-12T15:56:14.385454: step 1470, loss 0.205922, acc 0.921875
2018-09-12T15:56:14.771476: step 1471, loss 0.33661, acc 0.875
2018-09-12T15:56:15.170499: step 1472, loss 0.227717, acc 0.921875
2018-09-12T15:56:15.560521: step 1473, loss 0.126243, acc 0.953125
2018-09-12T15:56:15.968544: step 1474, loss 0.172706, acc 0.921875
2018-09-12T15:56:16.359567: step 1475, loss 0.278461, acc 0.84375
2018-09-12T15:56:16.753589: step 1476, loss 0.180003, acc 0.953125
2018-09-12T15:56:17.137611: step 1477, loss 0.161978, acc 0.9375
2018-09-12T15:56:17.567636: step 1478, loss 0.208873, acc 0.921875
2018-09-12T15:56:17.973659: step 1479, loss 0.190804, acc 0.9375
2018-09-12T15:56:18.360681: step 1480, loss 0.234183, acc 0.90625
2018-09-12T15:56:18.755704: step 1481, loss 0.140997, acc 0.96875
2018-09-12T15:56:19.141726: step 1482, loss 0.182258, acc 0.921875
2018-09-12T15:56:19.551749: step 1483, loss 0.243517, acc 0.859375
2018-09-12T15:56:19.941772: step 1484, loss 0.258878, acc 0.921875
2018-09-12T15:56:20.336794: step 1485, loss 0.163885, acc 0.9375
2018-09-12T15:56:20.761818: step 1486, loss 0.153624, acc 0.953125
2018-09-12T15:56:21.165842: step 1487, loss 0.264372, acc 0.890625
2018-09-12T15:56:21.555864: step 1488, loss 0.220541, acc 0.90625
2018-09-12T15:56:21.952887: step 1489, loss 0.256063, acc 0.859375
2018-09-12T15:56:22.334908: step 1490, loss 0.184507, acc 0.890625
2018-09-12T15:56:22.736931: step 1491, loss 0.0973564, acc 0.96875
2018-09-12T15:56:23.113953: step 1492, loss 0.31193, acc 0.890625
2018-09-12T15:56:23.512976: step 1493, loss 0.203077, acc 0.90625
2018-09-12T15:56:23.900998: step 1494, loss 0.112984, acc 0.96875
2018-09-12T15:56:24.293020: step 1495, loss 0.121947, acc 0.96875
2018-09-12T15:56:24.677042: step 1496, loss 0.138129, acc 0.96875
2018-09-12T15:56:25.072065: step 1497, loss 0.218038, acc 0.921875
2018-09-12T15:56:25.465087: step 1498, loss 0.178282, acc 0.921875
2018-09-12T15:56:25.862110: step 1499, loss 0.274016, acc 0.90625
2018-09-12T15:56:26.241132: step 1500, loss 0.108865, acc 0.95

Evaluation:
2018-09-12T15:56:27.343195: step 1500, loss 0.638916, acc 0.713884

Saved model checkpoint to ./-1500

2018-09-12T15:56:28.617268: step 1501, loss 0.0918366, acc 0.96875
2018-09-12T15:56:29.004290: step 1502, loss 0.173333, acc 0.890625
2018-09-12T15:56:29.449315: step 1503, loss 0.0550469, acc 0.984375
2018-09-12T15:56:29.931343: step 1504, loss 0.159637, acc 0.953125
2018-09-12T15:56:30.329366: step 1505, loss 0.145387, acc 0.9375
2018-09-12T15:56:30.719388: step 1506, loss 0.254048, acc 0.875
2018-09-12T15:56:31.131412: step 1507, loss 0.112293, acc 0.9375
2018-09-12T15:56:31.535435: step 1508, loss 0.219521, acc 0.9375
2018-09-12T15:56:31.951458: step 1509, loss 0.206025, acc 0.90625
2018-09-12T15:56:32.347481: step 1510, loss 0.127382, acc 0.96875
2018-09-12T15:56:32.735503: step 1511, loss 0.157697, acc 0.921875
2018-09-12T15:56:33.153527: step 1512, loss 0.188429, acc 0.9375
2018-09-12T15:56:33.536549: step 1513, loss 0.132132, acc 0.953125
2018-09-12T15:56:33.935572: step 1514, loss 0.109845, acc 0.953125
2018-09-12T15:56:34.334595: step 1515, loss 0.119603, acc 0.921875
2018-09-12T15:56:34.740618: step 1516, loss 0.0755205, acc 1
2018-09-12T15:56:35.133640: step 1517, loss 0.10233, acc 0.96875
2018-09-12T15:56:35.528663: step 1518, loss 0.123738, acc 0.953125
2018-09-12T15:56:35.927686: step 1519, loss 0.135598, acc 0.9375
2018-09-12T15:56:36.316708: step 1520, loss 0.147403, acc 0.90625
2018-09-12T15:56:36.706730: step 1521, loss 0.127152, acc 0.921875
2018-09-12T15:56:37.094753: step 1522, loss 0.117076, acc 0.96875
2018-09-12T15:56:37.496776: step 1523, loss 0.161982, acc 0.953125
2018-09-12T15:56:37.894798: step 1524, loss 0.201109, acc 0.921875
2018-09-12T15:56:38.285821: step 1525, loss 0.131247, acc 0.921875
2018-09-12T15:56:38.680843: step 1526, loss 0.12976, acc 0.953125
2018-09-12T15:56:39.075866: step 1527, loss 0.0985793, acc 0.9375
2018-09-12T15:56:39.471889: step 1528, loss 0.177121, acc 0.890625
2018-09-12T15:56:39.872912: step 1529, loss 0.0913787, acc 0.984375
2018-09-12T15:56:40.261934: step 1530, loss 0.119913, acc 0.9375
2018-09-12T15:56:40.660957: step 1531, loss 0.198141, acc 0.9375
2018-09-12T15:56:41.047979: step 1532, loss 0.222882, acc 0.921875
2018-09-12T15:56:41.447002: step 1533, loss 0.155539, acc 0.9375
2018-09-12T15:56:41.845024: step 1534, loss 0.20495, acc 0.875
2018-09-12T15:56:42.255048: step 1535, loss 0.0926397, acc 0.96875
2018-09-12T15:56:42.685072: step 1536, loss 0.123109, acc 0.984375
2018-09-12T15:56:43.128098: step 1537, loss 0.151159, acc 0.9375
2018-09-12T15:56:43.580124: step 1538, loss 0.23052, acc 0.921875
2018-09-12T15:56:44.022149: step 1539, loss 0.145929, acc 0.953125
2018-09-12T15:56:44.469174: step 1540, loss 0.10282, acc 0.96875
2018-09-12T15:56:44.907200: step 1541, loss 0.077975, acc 0.96875
2018-09-12T15:56:45.341224: step 1542, loss 0.160777, acc 0.921875
2018-09-12T15:56:45.756248: step 1543, loss 0.187488, acc 0.9375
2018-09-12T15:56:46.145270: step 1544, loss 0.101614, acc 0.96875
2018-09-12T15:56:46.538293: step 1545, loss 0.195172, acc 0.90625
2018-09-12T15:56:46.931315: step 1546, loss 0.223831, acc 0.890625
2018-09-12T15:56:47.322338: step 1547, loss 0.18802, acc 0.921875
2018-09-12T15:56:47.715360: step 1548, loss 0.257226, acc 0.890625
2018-09-12T15:56:48.119383: step 1549, loss 0.205753, acc 0.90625
2018-09-12T15:56:48.518406: step 1550, loss 0.134129, acc 0.953125
2018-09-12T15:56:48.934430: step 1551, loss 0.129091, acc 0.953125
2018-09-12T15:56:49.329452: step 1552, loss 0.159822, acc 0.9375
2018-09-12T15:56:49.736476: step 1553, loss 0.13819, acc 0.9375
2018-09-12T15:56:50.117498: step 1554, loss 0.142601, acc 0.953125
2018-09-12T15:56:50.514520: step 1555, loss 0.221971, acc 0.9375
2018-09-12T15:56:50.915543: step 1556, loss 0.102965, acc 0.96875
2018-09-12T15:56:51.318566: step 1557, loss 0.104697, acc 0.96875
2018-09-12T15:56:51.709589: step 1558, loss 0.198468, acc 0.921875
2018-09-12T15:56:52.105611: step 1559, loss 0.186629, acc 0.921875
2018-09-12T15:56:52.514635: step 1560, loss 0.100764, acc 0.953125
2018-09-12T15:56:52.900657: step 1561, loss 0.127644, acc 0.9375
2018-09-12T15:56:53.296679: step 1562, loss 0.178105, acc 0.90625
2018-09-12T15:56:53.681701: step 1563, loss 0.232327, acc 0.890625
2018-09-12T15:56:54.079724: step 1564, loss 0.0764486, acc 0.96875
2018-09-12T15:56:54.467746: step 1565, loss 0.146882, acc 0.953125
2018-09-12T15:56:54.873770: step 1566, loss 0.0993966, acc 0.96875
2018-09-12T15:56:55.255791: step 1567, loss 0.112644, acc 0.984375
2018-09-12T15:56:55.656814: step 1568, loss 0.111321, acc 0.953125
2018-09-12T15:56:56.043836: step 1569, loss 0.215198, acc 0.96875
2018-09-12T15:56:56.440859: step 1570, loss 0.0983199, acc 0.984375
2018-09-12T15:56:56.825881: step 1571, loss 0.180124, acc 0.9375
2018-09-12T15:56:57.214903: step 1572, loss 0.150462, acc 0.921875
2018-09-12T15:56:57.611926: step 1573, loss 0.113314, acc 0.953125
2018-09-12T15:56:58.002949: step 1574, loss 0.254343, acc 0.890625
2018-09-12T15:56:58.422973: step 1575, loss 0.133599, acc 0.9375
2018-09-12T15:56:58.864998: step 1576, loss 0.240021, acc 0.90625
2018-09-12T15:56:59.310023: step 1577, loss 0.123862, acc 0.953125
2018-09-12T15:56:59.767049: step 1578, loss 0.159224, acc 0.921875
2018-09-12T15:57:00.276079: step 1579, loss 0.0912489, acc 0.96875
2018-09-12T15:57:00.778107: step 1580, loss 0.0926124, acc 0.984375
2018-09-12T15:57:01.399143: step 1581, loss 0.181708, acc 0.9375
2018-09-12T15:57:01.857169: step 1582, loss 0.0984086, acc 0.96875
2018-09-12T15:57:02.273193: step 1583, loss 0.161925, acc 0.953125
2018-09-12T15:57:02.708218: step 1584, loss 0.149363, acc 0.953125
2018-09-12T15:57:03.145243: step 1585, loss 0.135848, acc 0.9375
2018-09-12T15:57:03.557266: step 1586, loss 0.216114, acc 0.921875
2018-09-12T15:57:03.952289: step 1587, loss 0.112512, acc 0.96875
2018-09-12T15:57:04.347311: step 1588, loss 0.0914948, acc 0.96875
2018-09-12T15:57:04.737334: step 1589, loss 0.0858716, acc 0.984375
2018-09-12T15:57:05.126356: step 1590, loss 0.0979, acc 0.96875
2018-09-12T15:57:05.524379: step 1591, loss 0.140684, acc 0.921875
2018-09-12T15:57:05.924402: step 1592, loss 0.215568, acc 0.921875
2018-09-12T15:57:06.316424: step 1593, loss 0.224316, acc 0.90625
2018-09-12T15:57:06.725447: step 1594, loss 0.110295, acc 0.953125
2018-09-12T15:57:07.113470: step 1595, loss 0.113148, acc 0.953125
2018-09-12T15:57:07.504492: step 1596, loss 0.0903001, acc 0.96875
2018-09-12T15:57:07.898515: step 1597, loss 0.0922538, acc 0.96875
2018-09-12T15:57:08.283537: step 1598, loss 0.112337, acc 0.953125
2018-09-12T15:57:08.711561: step 1599, loss 0.0872437, acc 0.984375
2018-09-12T15:57:09.104584: step 1600, loss 0.125447, acc 0.9375

Evaluation:
2018-09-12T15:57:10.107641: step 1600, loss 0.652642, acc 0.726079

Saved model checkpoint to ./-1600

2018-09-12T15:57:11.451718: step 1601, loss 0.194961, acc 0.9375
2018-09-12T15:57:11.866742: step 1602, loss 0.0793139, acc 0.984375
2018-09-12T15:57:12.405772: step 1603, loss 0.165498, acc 0.9375
2018-09-12T15:57:12.897800: step 1604, loss 0.128892, acc 0.9375
2018-09-12T15:57:13.285823: step 1605, loss 0.0638237, acc 1
2018-09-12T15:57:13.698846: step 1606, loss 0.182495, acc 0.90625
2018-09-12T15:57:14.112870: step 1607, loss 0.169701, acc 0.9375
2018-09-12T15:57:14.560896: step 1608, loss 0.175784, acc 0.921875
2018-09-12T15:57:15.057924: step 1609, loss 0.156054, acc 0.953125
2018-09-12T15:57:15.592955: step 1610, loss 0.268097, acc 0.890625
2018-09-12T15:57:16.033980: step 1611, loss 0.181597, acc 0.90625
2018-09-12T15:57:16.471005: step 1612, loss 0.163613, acc 0.9375
2018-09-12T15:57:16.904030: step 1613, loss 0.14569, acc 0.9375
2018-09-12T15:57:17.346055: step 1614, loss 0.180421, acc 0.9375
2018-09-12T15:57:17.807081: step 1615, loss 0.0903308, acc 0.953125
2018-09-12T15:57:18.188103: step 1616, loss 0.100587, acc 0.9375
2018-09-12T15:57:18.590126: step 1617, loss 0.290655, acc 0.859375
2018-09-12T15:57:19.003150: step 1618, loss 0.124172, acc 0.96875
2018-09-12T15:57:19.412173: step 1619, loss 0.186737, acc 0.90625
2018-09-12T15:57:19.860199: step 1620, loss 0.236214, acc 0.890625
2018-09-12T15:57:20.280223: step 1621, loss 0.181157, acc 0.90625
2018-09-12T15:57:20.733249: step 1622, loss 0.101332, acc 0.96875
2018-09-12T15:57:21.200275: step 1623, loss 0.172586, acc 0.90625
2018-09-12T15:57:21.621299: step 1624, loss 0.130251, acc 0.9375
2018-09-12T15:57:22.054324: step 1625, loss 0.185335, acc 0.90625
2018-09-12T15:57:22.490349: step 1626, loss 0.200845, acc 0.921875
2018-09-12T15:57:22.884372: step 1627, loss 0.0818094, acc 0.953125
2018-09-12T15:57:23.279394: step 1628, loss 0.11694, acc 0.953125
2018-09-12T15:57:23.683417: step 1629, loss 0.145715, acc 0.921875
2018-09-12T15:57:24.072440: step 1630, loss 0.115836, acc 0.984375
2018-09-12T15:57:24.473463: step 1631, loss 0.279144, acc 0.875
2018-09-12T15:57:24.868485: step 1632, loss 0.1564, acc 0.9375
2018-09-12T15:57:25.276508: step 1633, loss 0.179494, acc 0.9375
2018-09-12T15:57:25.682532: step 1634, loss 0.138594, acc 0.953125
2018-09-12T15:57:26.102556: step 1635, loss 0.180683, acc 0.9375
2018-09-12T15:57:26.513579: step 1636, loss 0.185183, acc 0.890625
2018-09-12T15:57:26.926603: step 1637, loss 0.110082, acc 0.953125
2018-09-12T15:57:27.329626: step 1638, loss 0.1158, acc 0.953125
2018-09-12T15:57:27.716648: step 1639, loss 0.11135, acc 0.953125
2018-09-12T15:57:28.114671: step 1640, loss 0.138075, acc 0.921875
2018-09-12T15:57:28.501693: step 1641, loss 0.118052, acc 0.953125
2018-09-12T15:57:28.939718: step 1642, loss 0.146258, acc 0.953125
2018-09-12T15:57:29.394744: step 1643, loss 0.178555, acc 0.9375
2018-09-12T15:57:29.818768: step 1644, loss 0.0994109, acc 0.96875
2018-09-12T15:57:30.215791: step 1645, loss 0.235825, acc 0.90625
2018-09-12T15:57:30.684818: step 1646, loss 0.119997, acc 0.953125
2018-09-12T15:57:31.168846: step 1647, loss 0.179564, acc 0.90625
2018-09-12T15:57:31.667874: step 1648, loss 0.153405, acc 0.953125
2018-09-12T15:57:32.157902: step 1649, loss 0.163199, acc 0.9375
2018-09-12T15:57:32.667931: step 1650, loss 0.1055, acc 0.983333
2018-09-12T15:57:33.101956: step 1651, loss 0.16415, acc 0.921875
2018-09-12T15:57:33.573983: step 1652, loss 0.184561, acc 0.921875
2018-09-12T15:57:34.021009: step 1653, loss 0.14465, acc 0.96875
2018-09-12T15:57:34.455033: step 1654, loss 0.078579, acc 0.96875
2018-09-12T15:57:34.936061: step 1655, loss 0.0944775, acc 0.984375
2018-09-12T15:57:35.393087: step 1656, loss 0.131687, acc 0.921875
2018-09-12T15:57:35.794110: step 1657, loss 0.109535, acc 0.96875
2018-09-12T15:57:36.220134: step 1658, loss 0.119439, acc 0.953125
2018-09-12T15:57:36.601156: step 1659, loss 0.154976, acc 0.921875
2018-09-12T15:57:36.998179: step 1660, loss 0.0830105, acc 0.984375
2018-09-12T15:57:37.392201: step 1661, loss 0.0473422, acc 1
2018-09-12T15:57:37.832227: step 1662, loss 0.184636, acc 0.921875
2018-09-12T15:57:38.278252: step 1663, loss 0.14609, acc 0.9375
2018-09-12T15:57:38.671275: step 1664, loss 0.0683763, acc 0.984375
2018-09-12T15:57:39.076298: step 1665, loss 0.109102, acc 0.96875
2018-09-12T15:57:39.492322: step 1666, loss 0.0963881, acc 0.953125
2018-09-12T15:57:39.891344: step 1667, loss 0.129421, acc 0.9375
2018-09-12T15:57:40.284367: step 1668, loss 0.201473, acc 0.90625
2018-09-12T15:57:40.800396: step 1669, loss 0.0667278, acc 0.984375
2018-09-12T15:57:41.187419: step 1670, loss 0.150037, acc 0.9375
2018-09-12T15:57:41.575441: step 1671, loss 0.0563151, acc 0.984375
2018-09-12T15:57:41.956463: step 1672, loss 0.0938882, acc 0.96875
2018-09-12T15:57:42.347485: step 1673, loss 0.0957915, acc 0.953125
2018-09-12T15:57:42.729507: step 1674, loss 0.048519, acc 1
2018-09-12T15:57:43.115529: step 1675, loss 0.0570937, acc 0.984375
2018-09-12T15:57:43.500551: step 1676, loss 0.135536, acc 0.96875
2018-09-12T15:57:43.889573: step 1677, loss 0.102716, acc 0.953125
2018-09-12T15:57:44.263594: step 1678, loss 0.106674, acc 0.96875
2018-09-12T15:57:44.659617: step 1679, loss 0.0539181, acc 1
2018-09-12T15:57:45.035639: step 1680, loss 0.0784201, acc 0.984375
2018-09-12T15:57:45.429661: step 1681, loss 0.113984, acc 0.9375
2018-09-12T15:57:45.827684: step 1682, loss 0.168655, acc 0.953125
2018-09-12T15:57:46.216706: step 1683, loss 0.130373, acc 0.9375
2018-09-12T15:57:46.617729: step 1684, loss 0.104908, acc 0.96875
2018-09-12T15:57:46.999751: step 1685, loss 0.122701, acc 0.96875
2018-09-12T15:57:47.392773: step 1686, loss 0.0897949, acc 0.984375
2018-09-12T15:57:47.772795: step 1687, loss 0.13983, acc 0.953125
2018-09-12T15:57:48.171818: step 1688, loss 0.132142, acc 0.96875
2018-09-12T15:57:48.562840: step 1689, loss 0.0965643, acc 0.953125
2018-09-12T15:57:48.984865: step 1690, loss 0.0779362, acc 0.984375
2018-09-12T15:57:49.403889: step 1691, loss 0.132989, acc 0.9375
2018-09-12T15:57:49.874915: step 1692, loss 0.0764729, acc 0.96875
2018-09-12T15:57:50.280939: step 1693, loss 0.0714849, acc 0.984375
2018-09-12T15:57:50.720964: step 1694, loss 0.198847, acc 0.9375
2018-09-12T15:57:51.139988: step 1695, loss 0.0820695, acc 0.96875
2018-09-12T15:57:51.595014: step 1696, loss 0.121983, acc 0.953125
2018-09-12T15:57:52.008037: step 1697, loss 0.0810092, acc 0.96875
2018-09-12T15:57:52.396060: step 1698, loss 0.0620938, acc 0.984375
2018-09-12T15:57:52.774081: step 1699, loss 0.120163, acc 0.953125
2018-09-12T15:57:53.161103: step 1700, loss 0.224572, acc 0.921875

Evaluation:
2018-09-12T15:57:54.153160: step 1700, loss 0.692615, acc 0.718574

Saved model checkpoint to ./-1700

2018-09-12T15:57:55.257223: step 1701, loss 0.183722, acc 0.9375
2018-09-12T15:57:55.622244: step 1702, loss 0.123793, acc 0.953125
2018-09-12T15:57:56.006266: step 1703, loss 0.114296, acc 0.9375
2018-09-12T15:57:56.499294: step 1704, loss 0.0902714, acc 0.96875
2018-09-12T15:57:56.863315: step 1705, loss 0.153267, acc 0.90625
2018-09-12T15:57:57.243337: step 1706, loss 0.0767985, acc 0.96875
2018-09-12T15:57:57.629359: step 1707, loss 0.126677, acc 0.953125
2018-09-12T15:57:58.019381: step 1708, loss 0.0824941, acc 0.96875
2018-09-12T15:57:58.396403: step 1709, loss 0.0830489, acc 0.96875
2018-09-12T15:57:58.797426: step 1710, loss 0.10121, acc 0.984375
2018-09-12T15:57:59.179448: step 1711, loss 0.0953091, acc 0.984375
2018-09-12T15:57:59.579471: step 1712, loss 0.113463, acc 0.9375
2018-09-12T15:57:59.960492: step 1713, loss 0.153286, acc 0.921875
2018-09-12T15:58:00.363515: step 1714, loss 0.255159, acc 0.90625
2018-09-12T15:58:00.756538: step 1715, loss 0.0658722, acc 0.96875
2018-09-12T15:58:01.152560: step 1716, loss 0.101563, acc 0.953125
2018-09-12T15:58:01.623587: step 1717, loss 0.159747, acc 0.96875
2018-09-12T15:58:02.008609: step 1718, loss 0.0757148, acc 0.96875
2018-09-12T15:58:02.408632: step 1719, loss 0.0629775, acc 0.984375
2018-09-12T15:58:02.861658: step 1720, loss 0.116574, acc 0.953125
2018-09-12T15:58:03.283682: step 1721, loss 0.102356, acc 0.953125
2018-09-12T15:58:03.695706: step 1722, loss 0.127944, acc 0.953125
2018-09-12T15:58:04.117730: step 1723, loss 0.137489, acc 0.953125
2018-09-12T15:58:04.535754: step 1724, loss 0.119659, acc 0.9375
2018-09-12T15:58:04.973779: step 1725, loss 0.111118, acc 0.953125
2018-09-12T15:58:05.381802: step 1726, loss 0.175476, acc 0.9375
2018-09-12T15:58:05.779825: step 1727, loss 0.140342, acc 0.9375
2018-09-12T15:58:06.160847: step 1728, loss 0.0772523, acc 0.984375
2018-09-12T15:58:06.564870: step 1729, loss 0.16818, acc 0.9375
2018-09-12T15:58:06.947892: step 1730, loss 0.130235, acc 0.96875
2018-09-12T15:58:07.335914: step 1731, loss 0.140974, acc 0.921875
2018-09-12T15:58:07.720936: step 1732, loss 0.0443673, acc 0.984375
2018-09-12T15:58:08.101958: step 1733, loss 0.175804, acc 0.953125
2018-09-12T15:58:08.486980: step 1734, loss 0.152922, acc 0.953125
2018-09-12T15:58:08.874002: step 1735, loss 0.158183, acc 0.921875
2018-09-12T15:58:09.270025: step 1736, loss 0.147136, acc 0.96875
2018-09-12T15:58:09.656047: step 1737, loss 0.154722, acc 0.90625
2018-09-12T15:58:10.045069: step 1738, loss 0.155381, acc 0.9375
2018-09-12T15:58:10.444092: step 1739, loss 0.0756082, acc 0.96875
2018-09-12T15:58:10.848115: step 1740, loss 0.0600739, acc 0.984375
2018-09-12T15:58:11.243138: step 1741, loss 0.175782, acc 0.9375
2018-09-12T15:58:11.629160: step 1742, loss 0.146165, acc 0.953125
2018-09-12T15:58:12.088186: step 1743, loss 0.0626364, acc 0.984375
2018-09-12T15:58:12.466208: step 1744, loss 0.0693215, acc 0.96875
2018-09-12T15:58:12.874231: step 1745, loss 0.0883909, acc 0.984375
2018-09-12T15:58:13.258253: step 1746, loss 0.154651, acc 0.9375
2018-09-12T15:58:13.649275: step 1747, loss 0.105814, acc 0.984375
2018-09-12T15:58:14.033297: step 1748, loss 0.102885, acc 0.953125
2018-09-12T15:58:14.420319: step 1749, loss 0.169682, acc 0.953125
2018-09-12T15:58:14.807341: step 1750, loss 0.143264, acc 0.953125
2018-09-12T15:58:15.200364: step 1751, loss 0.103068, acc 0.953125
2018-09-12T15:58:15.578386: step 1752, loss 0.240799, acc 0.9375
2018-09-12T15:58:15.973408: step 1753, loss 0.0807882, acc 0.953125
2018-09-12T15:58:16.354430: step 1754, loss 0.0815566, acc 0.96875
2018-09-12T15:58:16.741452: step 1755, loss 0.0586034, acc 0.984375
2018-09-12T15:58:17.132474: step 1756, loss 0.120289, acc 0.953125
2018-09-12T15:58:17.518497: step 1757, loss 0.0631037, acc 0.984375
2018-09-12T15:58:17.911519: step 1758, loss 0.0568393, acc 0.96875
2018-09-12T15:58:18.296541: step 1759, loss 0.113339, acc 0.984375
2018-09-12T15:58:18.715565: step 1760, loss 0.0866439, acc 0.9375
2018-09-12T15:58:19.153590: step 1761, loss 0.0894506, acc 0.953125
2018-09-12T15:58:19.591615: step 1762, loss 0.167933, acc 0.921875
2018-09-12T15:58:20.019640: step 1763, loss 0.166192, acc 0.90625
2018-09-12T15:58:20.452664: step 1764, loss 0.147892, acc 0.921875
2018-09-12T15:58:20.894690: step 1765, loss 0.119837, acc 0.953125
2018-09-12T15:58:21.319714: step 1766, loss 0.0654189, acc 1
2018-09-12T15:58:21.722737: step 1767, loss 0.049986, acc 1
2018-09-12T15:58:22.193764: step 1768, loss 0.180198, acc 0.921875
2018-09-12T15:58:22.583786: step 1769, loss 0.116816, acc 0.953125
2018-09-12T15:58:22.970808: step 1770, loss 0.110986, acc 0.921875
2018-09-12T15:58:23.362831: step 1771, loss 0.156205, acc 0.9375
2018-09-12T15:58:23.748853: step 1772, loss 0.128855, acc 0.953125
2018-09-12T15:58:24.170877: step 1773, loss 0.0999411, acc 0.953125
2018-09-12T15:58:24.557899: step 1774, loss 0.199747, acc 0.921875
2018-09-12T15:58:24.965923: step 1775, loss 0.1341, acc 0.953125
2018-09-12T15:58:25.453950: step 1776, loss 0.112589, acc 0.953125
2018-09-12T15:58:25.835972: step 1777, loss 0.112474, acc 0.953125
2018-09-12T15:58:26.219994: step 1778, loss 0.0772189, acc 0.953125
2018-09-12T15:58:26.600016: step 1779, loss 0.103713, acc 0.96875
2018-09-12T15:58:26.998039: step 1780, loss 0.0977428, acc 0.96875
2018-09-12T15:58:27.379061: step 1781, loss 0.102151, acc 0.96875
2018-09-12T15:58:27.780083: step 1782, loss 0.123987, acc 0.9375
2018-09-12T15:58:28.163105: step 1783, loss 0.0913428, acc 0.984375
2018-09-12T15:58:28.558128: step 1784, loss 0.0843079, acc 0.953125
2018-09-12T15:58:28.945150: step 1785, loss 0.137353, acc 0.96875
2018-09-12T15:58:29.345173: step 1786, loss 0.148945, acc 0.9375
2018-09-12T15:58:29.738195: step 1787, loss 0.166942, acc 0.921875
2018-09-12T15:58:30.154219: step 1788, loss 0.120011, acc 0.953125
2018-09-12T15:58:30.543242: step 1789, loss 0.133921, acc 0.953125
2018-09-12T15:58:30.933264: step 1790, loss 0.0885371, acc 0.96875
2018-09-12T15:58:31.343287: step 1791, loss 0.0788627, acc 0.96875
2018-09-12T15:58:31.743310: step 1792, loss 0.131404, acc 0.96875
2018-09-12T15:58:32.300342: step 1793, loss 0.130806, acc 0.9375
2018-09-12T15:58:32.675363: step 1794, loss 0.113498, acc 0.953125
2018-09-12T15:58:33.064386: step 1795, loss 0.152984, acc 0.9375
2018-09-12T15:58:33.437407: step 1796, loss 0.0720437, acc 0.96875
2018-09-12T15:58:33.837430: step 1797, loss 0.183047, acc 0.9375
2018-09-12T15:58:34.216452: step 1798, loss 0.0989971, acc 0.9375
2018-09-12T15:58:34.636476: step 1799, loss 0.112819, acc 0.953125
2018-09-12T15:58:35.096502: step 1800, loss 0.0645651, acc 0.983333

Evaluation:
2018-09-12T15:58:36.401577: step 1800, loss 0.669385, acc 0.738274

Saved model checkpoint to ./-1800

2018-09-12T15:58:37.614646: step 1801, loss 0.0866994, acc 0.953125
2018-09-12T15:58:38.066672: step 1802, loss 0.0829303, acc 0.984375
2018-09-12T15:58:38.687707: step 1803, loss 0.119595, acc 0.921875
2018-09-12T15:58:39.217738: step 1804, loss 0.0581227, acc 1
2018-09-12T15:58:39.622761: step 1805, loss 0.0317904, acc 1
2018-09-12T15:58:40.031784: step 1806, loss 0.151686, acc 0.96875
2018-09-12T15:58:40.416806: step 1807, loss 0.0715018, acc 0.953125
2018-09-12T15:58:40.821829: step 1808, loss 0.115858, acc 0.9375
2018-09-12T15:58:41.242854: step 1809, loss 0.0947445, acc 0.96875
2018-09-12T15:58:41.675878: step 1810, loss 0.0852451, acc 1
2018-09-12T15:58:42.068901: step 1811, loss 0.117754, acc 0.921875
2018-09-12T15:58:42.478924: step 1812, loss 0.0656881, acc 0.984375
2018-09-12T15:58:42.882947: step 1813, loss 0.0291729, acc 1
2018-09-12T15:58:43.264969: step 1814, loss 0.082575, acc 0.953125
2018-09-12T15:58:43.665992: step 1815, loss 0.0707296, acc 0.984375
2018-09-12T15:58:44.055014: step 1816, loss 0.0311637, acc 1
2018-09-12T15:58:44.453037: step 1817, loss 0.0841259, acc 0.96875
2018-09-12T15:58:44.839059: step 1818, loss 0.0889192, acc 0.984375
2018-09-12T15:58:45.257083: step 1819, loss 0.0949694, acc 0.984375
2018-09-12T15:58:45.656106: step 1820, loss 0.143313, acc 0.9375
2018-09-12T15:58:46.075130: step 1821, loss 0.0580422, acc 0.984375
2018-09-12T15:58:46.475153: step 1822, loss 0.0472742, acc 0.96875
2018-09-12T15:58:46.893177: step 1823, loss 0.0944013, acc 0.984375
2018-09-12T15:58:47.287199: step 1824, loss 0.0567788, acc 0.984375
2018-09-12T15:58:47.697223: step 1825, loss 0.080419, acc 0.984375
2018-09-12T15:58:48.121247: step 1826, loss 0.0460927, acc 0.984375
2018-09-12T15:58:48.522270: step 1827, loss 0.114466, acc 0.953125
2018-09-12T15:58:48.930293: step 1828, loss 0.0375952, acc 1
2018-09-12T15:58:49.330316: step 1829, loss 0.0634031, acc 0.984375
2018-09-12T15:58:49.716338: step 1830, loss 0.0972484, acc 0.953125
2018-09-12T15:58:50.109361: step 1831, loss 0.0932635, acc 0.96875
2018-09-12T15:58:50.524384: step 1832, loss 0.0828707, acc 0.96875
2018-09-12T15:58:50.977410: step 1833, loss 0.0989156, acc 0.953125
2018-09-12T15:58:51.432436: step 1834, loss 0.103126, acc 0.953125
2018-09-12T15:58:51.924464: step 1835, loss 0.0671883, acc 0.984375
2018-09-12T15:58:52.372490: step 1836, loss 0.18999, acc 0.96875
2018-09-12T15:58:52.817516: step 1837, loss 0.101233, acc 0.984375
2018-09-12T15:58:53.231539: step 1838, loss 0.115449, acc 0.921875
2018-09-12T15:58:53.632562: step 1839, loss 0.0959113, acc 0.96875
2018-09-12T15:58:54.123590: step 1840, loss 0.0992159, acc 0.953125
2018-09-12T15:58:54.524613: step 1841, loss 0.168822, acc 0.9375
2018-09-12T15:58:54.907635: step 1842, loss 0.0534557, acc 1
2018-09-12T15:58:55.289657: step 1843, loss 0.125594, acc 0.9375
2018-09-12T15:58:55.676679: step 1844, loss 0.153958, acc 0.953125
2018-09-12T15:58:56.056701: step 1845, loss 0.0584267, acc 1
2018-09-12T15:58:56.461724: step 1846, loss 0.151718, acc 0.9375
2018-09-12T15:58:56.842746: step 1847, loss 0.0620392, acc 0.984375
2018-09-12T15:58:57.225768: step 1848, loss 0.0858748, acc 0.984375
2018-09-12T15:58:57.603789: step 1849, loss 0.0716494, acc 1
2018-09-12T15:58:57.993812: step 1850, loss 0.0645635, acc 0.96875
2018-09-12T15:58:58.364833: step 1851, loss 0.113674, acc 0.953125
2018-09-12T15:58:58.760855: step 1852, loss 0.087672, acc 0.953125
2018-09-12T15:58:59.136877: step 1853, loss 0.0733399, acc 0.96875
2018-09-12T15:58:59.562901: step 1854, loss 0.0862822, acc 0.984375
2018-09-12T15:58:59.945923: step 1855, loss 0.0867499, acc 0.96875
2018-09-12T15:59:00.324945: step 1856, loss 0.133702, acc 0.953125

转载自原文链接, 如需删除请联系管理员。

原文链接:英文文本分类(CNN),转载请注明来源!

0