Skip to content
Snippets Groups Projects
Commit 55c034d6 authored by Bhatu's avatar Bhatu
Browse files

Utility scripts for debugging and comparing outputs.

parent 7c325fc5
No related branches found
No related tags found
No related merge requests found
import numpy as np
import sys
def extract_txt_to_numpy_array(file, sf):
f = open(file, 'r')
op = [float(int(line.rstrip()))/(2**sf) for line in f]
f.close()
return np.array(op, dtype=np.float32)
def extract_float_txt_to_numpy_array(file):
f = open(file, 'r')
op = [float(line.rstrip()) for line in f]
f.close()
return np.array(op, dtype=np.float32)
if __name__ == "__main__":
if (len(sys.argv) != 5):
print("Usage: compare_output.py floating_point.txt fixed_point.txt SCALING_FACTOR PRECISION")
assert(len(sys.argv) == 5)
sf = int(sys.argv[3])
inp1 = extract_float_txt_to_numpy_array(sys.argv[1])
inp2 = extract_txt_to_numpy_array(sys.argv[2], sf)
prec = int(sys.argv[4])
np.testing.assert_almost_equal(inp1, inp2, decimal=prec)
# Usage: tf_output.float(floatingpt) party0_output(fixedpt) SCALING_FACTOR PRECISION(upto how many points to compare?)
# This first converts unsigned fixedpt to signed
SCRIPT_DIR="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
echo "Comparing output with tensorflow output upto $4 decimal points."
$SCRIPT_DIR/convert_to_signed.sh $2
#Then runs the comparison script on it.
python3 $SCRIPT_DIR/compare_output.py $1 $2_signed $3 $4
if [ "$?" -eq 0 ]; then
echo "Output matches upto ${4} decimal points"
else
echo "Output does not match upto ${4} decimal points"
fi
import sys
if __name__ == '__main__':
assert(len(sys.argv) == 4)
file_name = sys.argv[1]
input_scale = int(sys.argv[2])
output_scale = int(sys.argv[3])
output_file_name = file_name + '_' + str(output_scale)
output_file = open(output_file_name, "w")
with open(file_name, "r") as a_file:
for line in a_file:
output = int((float(int(line.strip()))/(2**input_scale)) * (2**output_scale))
output_file.write(str(output) + '\n')
output_file.close()
import sys
if __name__ == "__main__":
assert(len(sys.argv) == 3)
inp_fname = sys.argv[1]
out_fname = sys.argv[2]
f = open(inp_fname, 'r')
op = [(int(line.rstrip())) for line in f]
f.close()
f = open(out_fname, 'w')
for i in op:
f.write(str( i if (i<2**63) else i - 2**64) + '\n')
f.close()
SCRIPT_DIR="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
inp1=$1
temp_1=${inp1}_tmp_cmp
awk '$0==($0+0)' $inp1 > $temp_1
python3 ${SCRIPT_DIR}/convert_to_signed.py $temp_1 ${inp1}_signed
rm $temp_1
import tensorflow as tf
import numpy as np
import argparse
from tf_graph_io import *
from tf_graph_trans import *
import os.path
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'TFCompiler'))
import DumpTFMtData
from os import path
def check_operation_exists(graph, tensor_name):
op_list = [i.name for i in graph.get_operations()]
return tensor_name in op_list
def numpy_float_array_to_float_val_str(input_array):
chunk = ''
for val in np.nditer(input_array):
chunk += str(val) + '\n'
return chunk
def compile(model_fname, input_t_name, output_t_name, input_np_arr, output_fname):
if not model_fname.endswith('.pb'):
sys.exit("Please supply a valid tensorflow protobuf model (.pb extension)")
elif not "mpc_processed_" in model_fname:
sys.exit("""Please process model using preprocess_frozen_tf_graph.py.
This will optimise it and generate a new .pb with mpc_processed prefix.
Use that with this script.""")
else:
model_name = os.path.basename(model_fname)[:-3]
print("Loading processed tf graph ", model_fname)
graph = load_pb(model_fname)
if not check_operation_exists(graph, input_t_name):
sys.exit(input_t_name + " input does not exist in the graph")
if not check_operation_exists(graph, output_t_name):
sys.exit(output_t_name + " output does not exist in the graph")
if not os.path.isfile(input_np_arr):
sys.exit(input_np_arr + " file does not exist.")
input_t = graph.get_operation_by_name(input_t_name).outputs[0]
output_t = graph.get_operation_by_name(output_t_name).outputs[0]
np_input_t = np.load(input_np_arr, allow_pickle=True)
feed_dict = {input_t: np_input_t}
with graph.as_default():
with tf.Session() as sess:
# Run initializers generated by preprocessing
if check_operation_exists(graph, 'init_constvars'):
sess.run(graph.get_operation_by_name('init_constvars'))
else:
sess.run(tf.global_variables_initializer())
model_dir = os.path.realpath(os.path.dirname(model_fname))
os.chdir(model_dir)
output = sess.run(output_t, feed_dict)
with open(output_fname, 'w') as f:
f.write(numpy_float_array_to_float_val_str(output))
def boolean_string(s):
if s not in {'False', 'True'}:
raise ValueError('Not a valid boolean string')
return s == 'True'
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--modelName", required=True, type=str, help="Name of processed tensorflow model (mpc_processed*.pb)")
parser.add_argument("--inputTensorName", required=True, type=str, help="Name of the input tensor for the model. (Op name, dont add '/:0' suffix)")
parser.add_argument("--outputTensorName", required=True, type=str, help="Name of the input tensor for the model. (Op name, dont add '/:0' suffix)")
parser.add_argument("--inputTensorNumpyArr", required=True, type=str, help="Name of the input tensor numpy array file for the model.")
parser.add_argument("--outputFileName", required=True, type=str, help="Name of the output file to store the prediction.")
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
compile(args.modelName, args.inputTensorName, args.outputTensorName, args.inputTensorNumpyArr, args.outputFileName)
......@@ -163,6 +163,8 @@ def infer(savePreTrainedWeightsInt, savePreTrainedWeightsFloat, scalingFac, runP
print("*************** Done Prediction****************")
duration = end_time - start_time
print("Time taken in prediction : ", duration)
with open('ResNet_tf_pred.float','w+') as f:
f.write(DumpTFMtData.numpy_float_array_to_float_val_str(predictions))
trainVarsName = []
for node in optimized_graph_def.node:
......@@ -175,7 +177,6 @@ def infer(savePreTrainedWeightsInt, savePreTrainedWeightsFloat, scalingFac, runP
DumpTFMtData.dumpTrainedWeightsFloat(sess, trainVars, 'ResNet_weights_float.inp', 'w')
if saveImgAndWtData:
DumpTFMtData.dumpImgAndWeightsDataSeparate(sess, images[0], trainVars, 'ResNet_img.inp', 'ResNet_weights.inp', scalingFac)
return predictions
def parseArgs():
......
......@@ -87,7 +87,6 @@ def updateWeightsForBN(optimized_graph_def, sess, feed_dict={}):
for node in graphDef.node:
if (node.op == 'FusedBatchNorm' or node.op == 'FusedBatchNormV3'):
print("Updating BN weight, node.name = {0}".format(node.name))
gamma = graph.get_operation_by_name(node.input[1]).outputs[0]
beta = graph.get_operation_by_name(node.input[2]).outputs[0]
mu = graph.get_operation_by_name(node.input[3]).outputs[0]
......@@ -140,3 +139,8 @@ def dumpImgAndWeightsDataSeparate(sess, imgData, evalTensors, imgFileName, weigh
dumpImageDataInt(imgData, imgFileName, scalingFac, 'w')
dumpTrainedWeightsInt(sess, evalTensors, weightFileName, scalingFac, 'w', alreadyEvaluated=alreadyEvaluated)
def numpy_float_array_to_float_val_str(input_array):
chunk = ''
for val in numpy.nditer(input_array):
chunk += str(val) + '\n'
return chunk
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment