1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
|
it = 0 result = list() f = open('result_spp.txt', 'w') while it < len(tstid): if (it % 10 == 0): print(it) graph = tf.Graph() with graph.as_default(): img = Image.open(data_dir[tstid[it]]) filename_queue = tf.train.string_input_producer([data_dir[tstid[it]]]) reader = tf.WholeFileReader() key, value = reader.read(filename_queue) my_img = tf.image.decode_jpeg(value, channels = 3) my_img = tf.image.resize_images(my_img, [img.size[1] / 2, img.size[0] / 2], method = 1, align_corners = False)
my_img = tf.expand_dims(my_img, 0)
x = tf.placeholder('float', shape=my_img.get_shape()) print(my_img.get_shape()) conv1W = tf.Variable(net_data["conv1"][0]) conv1b = tf.Variable(net_data["conv1"][1]) conv2W = tf.Variable(net_data["conv2"][0]) conv2b = tf.Variable(net_data["conv2"][1]) conv3W = tf.Variable(net_data["conv3"][0]) conv3b = tf.Variable(net_data["conv3"][1]) conv4W = tf.Variable(net_data["conv4"][0]) conv4b = tf.Variable(net_data["conv4"][1]) conv5W = tf.Variable(net_data["conv5"][0]) conv5b = tf.Variable(net_data["conv5"][1]) fc6W = weight_variable([hidden_dim * 256, 4096], 'fc6W') fc6b = tf.Variable(net_data["fc6"][1]) fc7W = tf.Variable(net_data["fc7"][0]) fc7b = tf.Variable(net_data["fc7"][1]) fc8W = weight_variable([4096, num_classes], 'W_fc8') fc8b = bias_variable([num_classes], 'b_fc8') keep_prob = tf.placeholder('float')
def model(x): conv1 = tf.nn.relu(conv(x, conv1W, conv1b, 11, 11, 96, 4, 4, padding="SAME", group=1)) lrn1 = tf.nn.local_response_normalization(conv1, depth_radius=5, alpha=0.0001, beta=0.75, bias=1.0) maxpool1 = tf.nn.max_pool(lrn1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID') conv2 = tf.nn.relu(conv(maxpool1, conv2W, conv2b, 5, 5, 256, 1, 1, padding="SAME", group=2)) lrn2 = tf.nn.local_response_normalization(conv2, depth_radius=5, alpha=0.0001, beta=0.75, bias=1.0) maxpool2 = tf.nn.max_pool(lrn2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID') conv3 = tf.nn.relu(conv(maxpool2, conv3W, conv3b, 3, 3, 384, 1, 1, padding="SAME", group=1)) conv4 = tf.nn.relu(conv(conv3, conv4W, conv4b, 3, 3, 384, 1, 1, padding="SAME", group=2)) conv5 = tf.nn.relu(conv(conv4, conv5W, conv5b, 3, 3, 256, 1, 1, padding="SAME", group=2)) maxpool5 = spatial_pyramid_pool(conv5, int(conv5.get_shape()[0]), [int(conv5.get_shape()[1]), int(conv5.get_shape()[2])], out_pool_size) fc6 = tf.nn.relu_layer(tf.reshape(maxpool5, [-1, int(prod(maxpool5.get_shape()[1:]))]), fc6W, fc6b) fc6_drop = tf.nn.dropout(fc6, keep_prob) fc7 = tf.nn.relu_layer(fc6_drop, fc7W, fc7b) fc7_drop = tf.nn.dropout(fc7, keep_prob) fc8 = tf.nn.xw_plus_b(fc7_drop, fc8W, fc8b) prob = tf.nn.softmax(fc8) return prob
logits = model(x) predict = tf.argmax(logits, 1) saver = tf.train.Saver({v.op.name: v for v in [conv1W, conv1b, conv2W, conv2b, conv3W, conv3b, conv4W, conv4b, conv5W, conv5b, fc6W, fc6b, fc7W, fc7b, fc8W, fc8b]})
with tf.Session(graph=graph) as sess: init = tf.global_variables_initializer() sess.run(init) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) saver.restore(sess, './alex_model_spp.ckpt') image = sess.run(my_img) predict = predict.eval(feed_dict={x: image, keep_prob: 1.0}) result.append(predict[0]) f.write(data_dir[tstid[it]] + '\t' + str(predict[0]) + '\t' + str(labels[tstid[it]])) f.write('\n') coord.request_stop() coord.join(threads) sess.close() del sess it = it + 1
print('Test accuracy: %f' %(sum(np.array(result) == np.array(labels[tstid])).astype('float')/len(tstid))) f.close()
|