For reference, we have given the code listing for SparseAutoEncoder here, with the kl_divergence and cost discussed earlier:
class SparseAutoencoder(object): def __init__(self, num_input, num_hidden, transfer_function=tf.nn.softplus, optimizer=tf.train.AdamOptimizer(), scale=0.1): self.num_input = num_input self.num_hidden = num_hidden self.transfer = transfer_function self.scale = tf.placeholder(tf.float32) self.training_scale = scale network_weights = self._initialize_weights() self.weights = network_weights self.sparsity_level = np.repeat([0.05], self.num_hidden).astype(np.float32) self.sparse_reg = 0.0 # model self.x = tf.placeholder(tf.float32, [None, self.num_input]) self.hidden_layer ...