This repository was archived by the owner on Oct 21, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 12
Expand file tree
/
Copy paththreadedbatchdemo.py
More file actions
executable file
·117 lines (94 loc) · 3.21 KB
/
threadedbatchdemo.py
File metadata and controls
executable file
·117 lines (94 loc) · 3.21 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
#!/usr/bin/python3
import sys
sys.path.insert(0, '../../')
import microndla
import sys
import threading
import os
import PIL
from PIL import Image
import numpy as np
from argparse import ArgumentParser
# argument Checking
parser = ArgumentParser(description="Micron DLA Example")
_ = parser.add_argument
_('modelpath', type=str, default='', help='Path to the model file')
_('imagesdir', type=str, default='', help='A directory name with input files')
_('-r', '--res', type=int, default=[3, 224, 224], nargs='+', help='expected image size (planes, height, width)')
_('-c', '--categories', type=str, default='', help='Categories file')
_('-f','--nfpgas', type=int, default=1, help='Number of FPGAs to use')
_('-C','--nclusters', type=int, default=2, help='Number of clusters to use')
_('-b','--batch', type=int, default=1, help='Number images per cluster')
def GetResult():
categories = None
if args.categories != '':
with open(args.categories) as f:
categories = f.read().splitlines()
#Create the storage for the result and run one inference
while True:
result, info = ie.GetResult()
if info == None:
break
for batchidx in range(len(info)):
tresult = result[batchidx]
#Convert to numpy and print top-5
idxs = (-tresult).argsort()
print('')
print('-------------- ' + str(info[batchidx]) + ' --------------')
if categories != None:
for i in range(5):
print(categories[idxs[i]], tresult[idxs[i]])
else:
for i in range(5):
print(idxs[i], tresult[idxs[i]])
def LoadImage(imagepath):
#Load image into a numpy array
img = Image.open(imagepath)
#Resize it to the size expected by the network
img = img.resize((xres, yres), resample=PIL.Image.BILINEAR)
#Convert to numpy float
img = np.array(img).astype(np.float32) / 255
#Transpose to plane-major, as required by our API
img = np.ascontiguousarray(img.transpose(2,0,1))
#Normalize images
stat_mean = list([0.485, 0.456, 0.406])
stat_std = list([0.229, 0.224, 0.225])
for i in range(3):
img[i] = (img[i] - stat_mean[i])/stat_std[i]
return img
args = parser.parse_args()
xres = args.res[2]
yres = args.res[1]
#Create and initialize the snowflow object
ie = microndla.MDLA()
ie.SetFlag('imgs_per_cluster', str(args.batch))
#ie.SetFlag('debug','bw')
ie.SetFlag('nfpgas', str(args.nfpgas))
ie.SetFlag('nclusters', str(args.nclusters))
#Compile to a file
ie.Compile(args.modelpath)
batchsize = args.nfpgas * args.nclusters * args.batch
thread = threading.Thread(target = GetResult)
thread.start()
batchidx = 0
input = np.ndarray([batchsize, args.res[0], args.res[1], args.res[2]], dtype=np.float32)
info = {}
for fn in os.listdir(args.imagesdir):
try:
img = LoadImage(args.imagesdir + '/' + fn)
input[batchidx] = img
info[batchidx] = fn
batchidx += 1
if batchidx == batchsize:
ie.PutInput(input, info)
batchidx = 0
info = {}
except:
pass
if batchidx > 0:
ie.PutInput(input, info)
ie.PutInput(None, None)
thread.join()
#Free
ie.Free()
print('done')