Loading two models from Saver into the same Tensorflow session

I have two networks: a Model, which generates output and Adversary, which evaluates the result.

Both were trained separately, but now I need to combine their results in one session.

I tried to implement the solution suggested in this post: Run some pre-prepared Tensorflow networks at the same time

My code

with tf.name_scope("model"):
    model = Model(args)
with tf.name_scope("adv"):
    adversary = Adversary(adv_args)

#...

with tf.Session() as sess:
    tf.global_variables_initializer().run()

    # Get the variables specific to the `Model`
    # Also strip out the surperfluous ":0" for some reason not saved in the checkpoint
    model_varlist = {v.name.lstrip("model/")[:-2]: v 
                     for v in tf.global_variables() if v.name[:5] == "model"}
    model_saver = tf.train.Saver(var_list=model_varlist)
    model_ckpt = tf.train.get_checkpoint_state(args.save_dir)
    model_saver.restore(sess, model_ckpt.model_checkpoint_path)

    # Get the variables specific to the `Adversary`
    adv_varlist = {v.name.lstrip("avd/")[:-2]: v 
                   for v in tf.global_variables() if v.name[:3] == "adv"}
    adv_saver = tf.train.Saver(var_list=adv_varlist)
    adv_ckpt = tf.train.get_checkpoint_state(adv_args.save_dir)
    adv_saver.restore(sess, adv_ckpt.model_checkpoint_path)

Problem

A function call model_saver.restore()does not seem to do anything. In another module, I use the splash screen with tf.train.Saver(tf.global_variables())and restores the breakpoint.

model.tvars = tf.trainable_variables(). , , sess.run() tvars . , , .

, model_saver.restore() ?

+5
2

, , - .

, . , . : TensorFlow

, . , . -.

model_graph = tf.Graph()
with model_graph.as_default():
    model = Model(args)

adv_graph = tf.Graph()
with adv_graph.as_default():
    adversary = Adversary(adv_args)

adv_sess = tf.Session(graph=adv_graph)
sess = tf.Session(graph=model_graph)

with sess.as_default():
    with model_graph.as_default():
        tf.global_variables_initializer().run()
        model_saver = tf.train.Saver(tf.global_variables())
        model_ckpt = tf.train.get_checkpoint_state(args.save_dir)
        model_saver.restore(sess, model_ckpt.model_checkpoint_path)

with adv_sess.as_default():
    with adv_graph.as_default():
        tf.global_variables_initializer().run()
        adv_saver = tf.train.Saver(tf.global_variables())
        adv_ckpt = tf.train.get_checkpoint_state(adv_args.save_dir)
        adv_saver.restore(adv_sess, adv_ckpt.model_checkpoint_path)

, , tf with sess.as_default():.

sess.close()
adv_sess.close()
+17

, :

adv_varlist = {v.name.lstrip("avd/")[:-2]: v 

"Adv",

0

Source: https://habr.com/ru/post/1652662/


All Articles