0%

load多个图

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
class ImportGraph():
""" Importing and running isolated TF graph """
def __init__(self, loc):
# Create local graph and use it in the session
self.graph = tf.Graph()
config = tf.ConfigProto(log_device_placement=False)
config.gpu_options.allow_growth = True
self.sess = tf.Session(graph=self.graph, config=config)
with self.graph.as_default():
# Import saved model from location 'loc' into local graph
saver = tf.train.import_meta_graph(loc + '.meta',
clear_devices=True)
saver.restore(self.sess, loc)
# There are TWO options how to get activation operation:
# FROM SAVED COLLECTION:
self.logits = self.graph.get_operation_by_name('proj/Reshape_1').outputs[0]
# self.activation = tf.get_collection('activation')[0]
# BY NAME:
# self.activation = self.graph.get_operation_by_name('activation_opt').outputs[0]

def run(self, fd):
""" Running the activation operation previously imported """
return self.sess.run([self.logits],
feed_dict=fd)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
import tensorflow as tf
class Model:
def __init__(self,param):
self.param = param

# create & build graph
self.graph = tf.Graph()
self.build_graph()

# create session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
gpu_num = random.choice(cuda_gpu_count())
config.gpu_options.visible_device_list= str(gpu_num)
self.sess = tf.Session(config=config,graph=self.graph)

def build_graph(self):
with self.graph.as_default():
...
def __del__(self):
# explicitly collect resources by closing and deleting session and graph
self.sess.close()
del self.sess
del self.graph
del self.param

# train models and return the test accuracy
def train_test(self,train_data,train_label,test_data,test_label):
...

https://blog.csdn.net/silent56_th/article/details/81415940

squared error 下,梯度是 y * (1-y) *(-2 * (t - y)) 如果y=0,那么梯度不见了

softmax error下,梯度是 y * (1-y) / (y) 如果y=0,那么梯度还蛮大

1
2
3
4
if result is None:
result = temp
else:
result = torch.cat([result, temp])

1
2
3
4
5
a = list(self.parameters())[0].clone()
loss.backward()
self.optimizer.step()
b = list(self.parameters())[0].clone()
torch.equal(a.data, b.data)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function

import torch.nn as nn
import torch


class Linear(nn.Module):
def __init__(self, p_dropout=1):
super(Linear, self).__init__()
self.dropout = nn.Dropout(p_dropout)

def forward(self, x):
y = self.dropout(x)
return y


net = Linear()
a = torch.ones(4)

net.train()
print(net(a))

net.eval()
b = torch.ones(4)
print(net(b))
1
2
3
4
5
6
7
8
9
10
11
12
13
Variable containing:
0
0
0
0
[torch.FloatTensor of size 4]

Variable containing:
1
1
1
1
[torch.FloatTensor of size 4]

1
2
3
4
class NetALL:
__init__(self):
self.net1 = xx
self.net2 = xx
1
2
torch.save(self.net1.dict()) w1:
torch.save(self.dict()) net1.w1:

ga = git add
gcmsg = git commit -m
ggpush = git push origin master