Let's run this encoder on the same dataset that we used in the other examples and compare the results:
class SparseAutoEncoderExample: def main(self): mnist = input_data.read_data_sets('MNIST_data', one_hot = True) def get_random_block_from_data(data, batch_size): start_index = np.random.randint(0, len(data) - batch_size) return data[start_index:(start_index + batch_size)] X_train = mnist.train.images X_test = mnist.test.images n_samples = int(mnist.train.num_examples) training_epochs = 5 batch_size = 128 display_step = 1 autoencoder =SparseAutoencoder(num_input=784, num_hidden = 200, transfer_function = tf.nn.sigmoid, optimizer = tf.train.AdamOptimizer( learning_rate = 0.001), scale = 0.01) for epoch in ...