Skip to content
代码片段 群组 项目
提交 c49e8fc7 编辑于 作者: Toshihiro NAKAE's avatar Toshihiro NAKAE
浏览文件

Changed f-strings to format functions to support python3.5

上级 a9159cf2
无相关合并请求
......@@ -35,12 +35,12 @@ class CompressionNet:
for size in self.hidden_layer_sizes[:-1]:
n_layer += 1
z = tf.layers.dense(z, size, activation=self.activation,
name=f"layer_{n_layer}")
name="layer_{}".format(n_layer))
# activation function of last layer is linear
n_layer += 1
z = tf.layers.dense(z, self.hidden_layer_sizes[-1],
name=f"layer_{n_layer}")
name="layer_{}".format(n_layer))
return z
......@@ -50,12 +50,12 @@ class CompressionNet:
for size in self.hidden_layer_sizes[:-1][::-1]:
n_layer += 1
z = tf.layers.dense(z, size, activation=self.activation,
name=f"layer_{n_layer}")
name="layer_{}".format(n_layer))
# activation function of last layes is linear
n_layer += 1
x_dash = tf.layers.dense(z, self.input_size,
name=f"layer_{n_layer}")
name="layer_{}".format(n_layer))
return x_dash
......
......@@ -153,7 +153,7 @@ class DAGMM:
if (epoch + 1) % 100 == 0:
loss_val = self.sess.run(loss, feed_dict={input:x, drop:0})
print(f" epoch {epoch+1}/{self.epoch_size} : loss = {loss_val:.3f}")
print(" epoch {}/{} : loss = {:.3f}".format(epoch + 1, self.epoch_size, loss_val))
# Fix GMM parameter
fix = self.gmm.fix_op()
......
......@@ -46,10 +46,10 @@ class EstimationNet:
for size in self.hidden_layer_sizes[:-1]:
n_layer += 1
z = tf.layers.dense(z, size, activation=self.activation,
name=f"layer_{n_layer}")
name="layer_{}".format(n_layer))
if dropout_ratio is not None:
z = tf.layers.dropout(z, dropout_ratio,
name=f"drop_{n_layer}")
name="drop_{}".format(n_layer))
# Last layer uses linear function (=logits)
size = self.hidden_layer_sizes[-1]
......
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册