什么?AI黑白图像能够自动上色?( 五 )
out_channelsstride=stride) normalized=batchnorm(convolved) rectified=lrelu(normalized 0.2 ) layers.append(rectified) #layer_5:[batch 31 31 ndf* 8
=[batch 30 30 1
with tf.variable_scope( \"layer_%d\" %(len(layers)+ 1 )): convolved=discrim_conv(rectifiedout_channels= 1 stride= 1 ) output=tf.sigmoid(convolved) layers.append(output) return layers[ -1
with tf.variable_scope( \"generator\" ): out_channels= int (targets.get_shape()[ -1
) outputs=create_generator(inputsout_channels) #createtwocopiesofdiscriminatorone for realpairsandone for fakepairs #theysharethesameunderlyingvariables with tf.name_scope( \"real_discriminator\" ): with tf.variable_scope( \"discriminator\" ): # 2 x[batchheightwidthchannels
=[batch 30 30 1
predict_real=create_discriminator(inputstargets) with tf.name_scope( \"fake_discriminator\" ): with tf.variable_scope( \"discriminator\" reuse=True): # 2 x[batchheightwidthchannels
=[batch 30 30 1
predict_fake=create_discriminator(inputsoutputs) with tf.name_scope( \"discriminator_loss\" ): #minimizing-tf.logwill try to get inputsto 1 #predict_real= 1 #predict_fake= 0 discrim_loss=tf.reduce_mean(-(tf.log(predict_real+EPS)+tf.log( 1 -predict_fake+EPS))) with tf.name_scope( \"generator_loss\" ): #predict_fake= 1 #abs(targets-outputs)= 0 gen_loss_GAN=tf.reduce_mean(-tf.log(predict_fake+EPS)) gen_loss_L1=tf.reduce_mean(tf.abs(targets-outputs)) gen_loss=gen_loss_GAN*a.gan_weight+gen_loss_L1*a.l1_weight with tf.name_scope( \"discriminator_train\" ): discrim_tvars=[ var for var in tf.trainable_variables() if var .name.startswith( \"discriminator\" )
推荐阅读
- 魔兽世界|魔兽TBC:为什么玩家期待跨区组队?降低组队难度,无需频繁换区
- 李儒|三国杀:为什么叫李儒时代的骄傲,他到底有什么好骄傲的?厉害吗
- edg战队|EDG决赛生死局圣枪哥直呼最后两局!Viper质问监督怕什么
- 池子|“为什么优菈总是在让人始料未及的版本到来?我还没准备好呀!”
- 原神|原神:阿贝多有什么用?兼顾副C与辅助,三大作用机制详解
- 游戏本|双11告一段落,游戏狂欢月还在继续!什么装备可坐等老头环?
- FMVP|Scout的FMVP争议过大,根本原因在于观众不理解什么是FMVP!
- fpx战队|为什么同样是S赛夺冠,FPX的热度,完全没法跟IG与EDG相比?
- 手机游戏|DNF关服了会有什么补偿?玩家讨论出四种可能,最后一条最靠谱
- 拉克丝|棋高弈招:黑白法排位上分火了,一轮技能下去,敌方直接血条消失