Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

set allow_pickle=True in np.load() #1021

Merged
merged 7 commits into from
Jul 16, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,8 @@ To release a new version, please update the changelog as followed:
### Fixed
- Fix `tf.models.Model._construct_graph` for list of outputs, e.g. STN case (PR #1010)
- Enable better `in_channels` exception raise. (pR #1015)
- Set allow_pickle=True in np.load() (#PR 1021)

### Removed

### Security
Expand All @@ -94,7 +96,7 @@ To release a new version, please update the changelog as followed:

- @zsdonghao
- @ChrisWu1997: #1010 #1015
- @warshallrho: #1017
- @warshallrho: #1017 #1021

## [2.1.0]

Expand Down
4 changes: 2 additions & 2 deletions examples/tutorial_work_with_onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@
>>>model = onnx.load('mnist.onnx')
>>>tf_rep = prepare(model)
>>>#Image Path
>>>img = np.load("./assets/image.npz")
>>>img = np.load("./assets/image.npz", allow_pickle=True)
>>>output = tf_rep.run(img.reshape([1, 784]))
>>>print "The digit is classified as ", np.argmax(output)

Expand Down Expand Up @@ -317,7 +317,7 @@ def convert_onnx_to_model(onnx_input_path):
model = onnx.load(onnx_input_path)
tf_rep = prepare(model)
# Image Path
img = np.load("./assets/image.npz")
img = np.load("./assets/image.npz", allow_pickle=True)
output = tf_rep.run(img.reshape([1, 784]))
print("The digit is classified as ", np.argmax(output))

Expand Down
12 changes: 6 additions & 6 deletions tensorlayer/files/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -689,7 +689,7 @@ def load_cropped_svhn(path='data', include_extra=True):
np.savez(np_file, X=X_train, y=y_train)
del_file(filepath)
else:
v = np.load(np_file)
v = np.load(np_file, allow_pickle=True)
X_train = v['X']
y_train = v['y']
logging.info(" n_train: {}".format(len(y_train)))
Expand All @@ -706,7 +706,7 @@ def load_cropped_svhn(path='data', include_extra=True):
np.savez(np_file, X=X_test, y=y_test)
del_file(filepath)
else:
v = np.load(np_file)
v = np.load(np_file, allow_pickle=True)
X_test = v['X']
y_test = v['y']
logging.info(" n_test: {}".format(len(y_test)))
Expand All @@ -726,7 +726,7 @@ def load_cropped_svhn(path='data', include_extra=True):
np.savez(np_file, X=X_extra, y=y_extra)
del_file(filepath)
else:
v = np.load(np_file)
v = np.load(np_file, allow_pickle=True)
X_extra = v['X']
y_extra = v['y']
# print(X_train.shape, X_extra.shape)
Expand Down Expand Up @@ -2096,7 +2096,7 @@ def load_and_assign_npz_dict(name='model.npz', network=None, skip=False):
logging.error("file {} doesn't exist.".format(name))
return False

weights = np.load(name)
weights = np.load(name, allow_pickle=True)
if len(weights.keys()) != len(set(weights.keys())):
raise Exception("Duplication in model npz_dict %s" % name)

Expand Down Expand Up @@ -2288,9 +2288,9 @@ def load_npy_to_any(path='', name='file.npy'):
"""
file_path = os.path.join(path, name)
try:
return np.load(file_path).item()
return np.load(file_path, allow_pickle=True).item()
except Exception:
return np.load(file_path)
return np.load(file_path, allow_pickle=True)
raise Exception("[!] Fail to load %s" % file_path)


Expand Down
22 changes: 12 additions & 10 deletions tensorlayer/models/vgg.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,14 +57,16 @@
cfg = {
'A': [[64], 'M', [128], 'M', [256, 256], 'M', [512, 512], 'M', [512, 512], 'M', 'F', 'fc1', 'fc2', 'O'],
'B': [[64, 64], 'M', [128, 128], 'M', [256, 256], 'M', [512, 512], 'M', [512, 512], 'M', 'F', 'fc1', 'fc2', 'O'],
'D': [
[64, 64], 'M', [128, 128], 'M', [256, 256, 256], 'M', [512, 512, 512], 'M', [512, 512, 512], 'M', 'F', 'fc1',
'fc2', 'O'
],
'E': [
[64, 64], 'M', [128, 128], 'M', [256, 256, 256, 256], 'M', [512, 512, 512, 512], 'M', [512, 512, 512, 512], 'M',
'F', 'fc1', 'fc2', 'O'
],
'D':
[
[64, 64], 'M', [128, 128], 'M', [256, 256, 256], 'M', [512, 512, 512], 'M', [512, 512, 512], 'M', 'F',
'fc1', 'fc2', 'O'
],
'E':
[
[64, 64], 'M', [128, 128], 'M', [256, 256, 256, 256], 'M', [512, 512, 512, 512], 'M', [512, 512, 512, 512],
'M', 'F', 'fc1', 'fc2', 'O'
],
}

mapped_cfg = {
Expand Down Expand Up @@ -158,15 +160,15 @@ def restore_model(model, layer_type):
maybe_download_and_extract(model_saved_name[layer_type], 'models', model_urls[layer_type])
weights = []
if layer_type == 'vgg16':
npz = np.load(os.path.join('models', model_saved_name[layer_type]))
npz = np.load(os.path.join('models', model_saved_name[layer_type]), allow_pickle=True)
# get weight list
for val in sorted(npz.items()):
logging.info(" Loading weights %s in %s" % (str(val[1].shape), val[0]))
weights.append(val[1])
if len(model.all_weights) == len(weights):
break
elif layer_type == 'vgg19':
npz = np.load(os.path.join('models', model_saved_name[layer_type]), encoding='latin1').item()
npz = np.load(os.path.join('models', model_saved_name[layer_type]), allow_pickle=True, encoding='latin1').item()
# get weight list
for val in sorted(npz.items()):
logging.info(" Loading %s in %s" % (str(val[1][0].shape), val[0]))
Expand Down