diff --git a/Athos/tests/conftest.py b/Athos/tests/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..38bf615780f0c7877ceefa785a92162fe0bbe707
--- /dev/null
+++ b/Athos/tests/conftest.py
@@ -0,0 +1,65 @@
+import pytest
+import tempfile
+import shutil
+import os
+
+
+def pytest_addoption(parser):
+    parser.addoption(
+        "--backend",
+        action="store",
+        default="CPP",
+        help="backend : CPP | 2PC_HE | 2PC_OT | 3PC",
+    )
+
+
+@pytest.fixture(scope="session")
+def backend(request):
+    opt = request.config.getoption("--backend")
+    if opt not in ["CPP", "3PC", "2PC_HE", "2PC_OT"]:
+        opt = "CPP"
+    return opt
+
+
+@pytest.fixture(scope="session", autouse=True)
+def test_env():
+    config = {}
+    test_dir = "cryptflow_tests"
+    path = os.path.join(tempfile.gettempdir(), test_dir)
+    if os.path.exists(path):
+        shutil.rmtree(path, ignore_errors=True)
+    os.mkdir(path)
+    config["test_dir"] = path
+    return config
+
+
+def make_dir(path):
+    if os.path.exists(path):
+        shutil.rmtree(path, ignore_errors=True)
+    else:
+        os.mkdir(path)
+    return
+
+
+# Hook to check if test failed
+@pytest.hookimpl(tryfirst=True, hookwrapper=True)
+def pytest_runtest_makereport(item, call):
+    # execute all other hooks to obtain the report object
+    outcome = yield
+    rep = outcome.get_result()
+    # set a report attribute for each phase of a call, which can
+    # be "setup", "call", "teardown"
+    setattr(item, "rep_" + rep.when, rep)
+
+
+@pytest.fixture
+def test_dir(request, test_env):
+    test_name = request.node.name[len("test_") :]
+    main_test_dir = test_env["test_dir"]
+    test_dir = os.path.join(main_test_dir, "athos_test_" + test_name)
+    make_dir(test_dir)
+    yield test_dir
+    # Remove dir only if test passed
+    if not request.node.rep_call.failed:
+        shutil.rmtree(test_dir, ignore_errors=True)
+    return
diff --git a/Athos/tests/pytest.ini b/Athos/tests/pytest.ini
new file mode 100644
index 0000000000000000000000000000000000000000..1ceab9429575185002f0eea7d903b7e81d06cae7
--- /dev/null
+++ b/Athos/tests/pytest.ini
@@ -0,0 +1,2 @@
+[pytest]
+addopts = -p no:warnings
diff --git a/Athos/tests/pytest_coverage_tf.config b/Athos/tests/pytest_coverage_tf.config
new file mode 100644
index 0000000000000000000000000000000000000000..e896bccf86948c065a6b8f2f4b989167a234cf0b
--- /dev/null
+++ b/Athos/tests/pytest_coverage_tf.config
@@ -0,0 +1,12 @@
+[run]
+branch = True
+source = 
+	../SeeDot
+	../TFCompiler
+
+[report]
+exclude_lines =
+	if __name__ == .__main__.:
+
+[html]
+directory = coverage_html_report
diff --git a/Athos/tests/tf/unittests/test_arith_binops.py b/Athos/tests/tf/unittests/test_arith_binops.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ad5bd415516e6d6874356756c6bc005a72ca11b
--- /dev/null
+++ b/Athos/tests/tf/unittests/test_arith_binops.py
@@ -0,0 +1,175 @@
+import tensorflow as tf
+import numpy as np
+
+import pytest
+
+import sys
+import os
+
+# Athos DIR
+sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
+from tests.utils import Config, Compiler, assert_almost_equal
+
+@pytest.mark.parametrize(
+    "a_shape,b_shape,dtype",
+    [
+        ((4, 4), (4, 4), np.single),  # Normal
+        ((2, 2), (1,), np.single),  # Broadcasting
+        ((3, 1, 2, 1), (2, 1, 4), np.single),  # Broadcasting
+        ((2, 2), (), np.single),  # Constant
+    ],
+)
+@pytest.mark.parametrize(
+    "tfOp", [tf.math.add, tf.math.subtract, tf.math.multiply, tf.raw_ops.AddV2]
+)
+def test_arith_binop(test_dir, backend, tfOp, a_shape, b_shape, dtype):
+    graph = tf.Graph()
+    a_inp = dtype(np.random.randn(*a_shape))
+    b_inp = dtype(np.random.randn(*b_shape))
+    with graph.as_default():
+        a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
+        b = tf.constant(b_inp, name="b")
+        output = tfOp(x=a, y=b, name="output")
+    with tf.compat.v1.Session(graph=graph) as sess:
+        expected_output = sess.run(output, feed_dict={a: a_inp})
+
+    config = Config(backend).add_input(a).add_output(output)
+    compiler = Compiler(graph, config, test_dir)
+    mpc_output = compiler.compile_and_run([a_inp])
+    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
+    return
+
+@pytest.mark.parametrize(
+    "a_shape, b_shape, data_format, dtype",
+    [
+        ([4, 1, 4], [4], None, np.single),  # Normal
+        ([4, 1, 4], [4], 'N..C', np.single),  # Same as above
+        pytest.param([4, 4, 1], [4], 'NC..', np.single, marks=pytest.mark.skip(reason="[bias_add] NC.. not supported")),  # Normal
+    ],
+)
+def test_bias_add(test_dir, backend, a_shape, b_shape, data_format, dtype):
+    graph = tf.Graph()
+    a_inp = dtype(np.random.randn(*a_shape))
+    b_inp = dtype(np.random.randn(*b_shape))
+    with graph.as_default():
+        a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
+        b = tf.constant(b_inp, name="b")
+        output = tf.nn.bias_add(value=a, bias=b, data_format=data_format, name="output")
+    with tf.compat.v1.Session(graph=graph) as sess:
+        expected_output = sess.run(output, feed_dict={a: a_inp})
+
+    config = Config(backend).add_input(a).add_output(output)
+    compiler = Compiler(graph, config, test_dir)
+    mpc_output = compiler.compile_and_run([a_inp])
+    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
+    return
+
+
+@pytest.mark.parametrize("dtype", [np.single])
+@pytest.mark.parametrize(
+    "tfOp, a_val, divisor",
+    [
+        pytest.param(tf.divide, [7, -7], 5, marks=pytest.mark.skip(reason="[divide] Support for parsing DOUBLES")),  # [1, -2]
+        (tf.divide, [7.0, -7.0], 5.0),  # [1.4, -1.4]
+        pytest.param(tf.truediv, [7, -7], 5, marks=pytest.mark.skip(reason="[divide] Support for parsing DOUBLES")),  # [1.4, -1.4]
+        (tf.truediv, [7.0], 5.0),  # [1.4]
+        (tf.divide, 7.0, 5.0),  # 1.4
+        pytest.param(tf.floordiv, [7, -7], 5, marks=pytest.mark.skip(reason="[divide] Add support for converting div by constant into a mul")),  # [1, -2]
+        pytest.param(tf.floordiv, [7.0, -7.0], 5.0, marks=pytest.mark.skip(reason="[divide] Add support for converting div by constant into a mul")),  # [1.0, -2.0]
+        pytest.param(tf.truncatediv, -7, 5, marks=pytest.mark.skip(reason="[divide] Truncated div not supported")),  # -1
+    ],
+)
+def test_div(test_dir, backend, tfOp, a_val, divisor, dtype):
+    graph = tf.Graph()
+    a_inp = np.array(a_val)
+    with graph.as_default():
+        b = tf.constant(divisor, name="b")
+        a = tf.compat.v1.placeholder(tf.as_dtype(b.dtype), shape=a_inp.shape, name="a")
+        output = tfOp(a, b, name="output")
+    with tf.compat.v1.Session(graph=graph) as sess:
+        expected_output = sess.run(output, feed_dict={a: a_inp})
+
+    config = Config(backend).add_input(a).add_output(output)
+    compiler = Compiler(graph, config, test_dir)
+    mpc_output = compiler.compile_and_run([a_inp])
+    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
+    return
+
+
+@pytest.mark.parametrize(
+    "a_shape, b_shape, transpose_a, transpose_b, bisModel",
+    [
+        ([3, 2], [2, 3], False, False, True),
+        pytest.param(
+            [3, 2],
+            [2, 3],
+            False,
+            False,
+            False,
+            marks=pytest.mark.skip(
+                reason="[matmul] expect atleast one param to belong to model"
+            ),
+        ),
+    ],
+)
+@pytest.mark.parametrize("dtype", [np.single])
+def test_matmul(
+    test_dir, backend, a_shape, b_shape, transpose_a, transpose_b, bisModel, dtype
+):
+    if backend == "2PC_HE":
+        pytest.skip(
+            "Assertion error in 2PC_HE FCField::matrix_multiplication Assertion `num_cols == 1' failed."
+        )
+    graph = tf.Graph()
+    a_inp = dtype(np.random.randn(*a_shape))
+    b_inp = dtype(np.random.randn(*b_shape))
+    with graph.as_default():
+        a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
+        if bisModel:
+            b = tf.constant(b_inp, name="b")
+        else:
+            b = tf.compat.v1.placeholder(
+                tf.as_dtype(dtype), shape=b_inp.shape, name="b"
+            )
+        output = tf.matmul(a, b, transpose_a, transpose_b, name="output")
+    with tf.compat.v1.Session(graph=graph) as sess:
+        feed_dict = {a: a_inp}
+        if not bisModel:
+            feed_dict[b] = b_inp
+        expected_output = sess.run(output, feed_dict=feed_dict)
+    config = Config(backend).add_input(a).add_output(output)
+    if not bisModel:
+        config.add_input(b)
+    config.config["scale"] = 12
+    compiler = Compiler(graph, config, test_dir)
+    mpc_output = compiler.compile_and_run([a_inp])
+    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
+    return
+
+
+@pytest.mark.parametrize(
+    "a, b",
+    [
+        ([1.2, 1.3], [1.2, 1.3]),
+        ([1.2, 1.3], [1.2, 1.2]),
+        ([1.2, 1.3], [1.2]),
+    ],
+)
+@pytest.mark.parametrize("dtype", [np.single])
+@pytest.mark.skip(reason="[equal] Not able to cast boolean to int ezpc")
+def test_equal(test_dir, backend, a, b, dtype):
+    graph = tf.Graph()
+    a_inp = dtype(np.array(a))
+    b_inp = dtype(np.array(b))
+    with graph.as_default():
+        a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
+        b = tf.constant(b_inp, name="b")
+        output = tf.math.equal(a, b, name="output")
+    with tf.compat.v1.Session(graph=graph) as sess:
+        expected_output = sess.run(output, feed_dict={a: a_inp})
+
+    config = Config(backend).add_input(a).add_output(output)
+    compiler = Compiler(graph, config, test_dir)
+    mpc_output = compiler.compile_and_run([a_inp])
+    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
+    return
diff --git a/Athos/tests/tf/unittests/test_batchnorm.py b/Athos/tests/tf/unittests/test_batchnorm.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b6f393d5f3ad6643e0adb6108188fb755d78ca7
--- /dev/null
+++ b/Athos/tests/tf/unittests/test_batchnorm.py
@@ -0,0 +1,48 @@
+import tensorflow as tf
+import numpy as np
+
+import pytest
+
+import sys
+import os
+
+# Athos DIR
+sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
+from tests.utils import Config, Compiler, assert_almost_equal
+
+
+@pytest.mark.parametrize(
+    "a_shape, scale, offset, mean, variance",
+    [([1, 2, 2, 1], [1.5], [2.3], [0.5], [0.2]), 
+    #([1], 1.5, 2.3, 0.5, 0.2), ([], 1.5, 2.3, 0.5, 0.2)
+    ],
+)
+@pytest.mark.parametrize("dtype", [np.single])
+@pytest.mark.parametrize(
+    "tfOp", [tf.raw_ops.FusedBatchNorm]
+)
+@pytest.mark.skip(reason="[batch_norm] Test not complete")
+def test_fused_batch_norm(
+    test_dir, backend, tfOp, a_shape, scale, offset, mean, variance, dtype
+):
+    graph = tf.Graph()
+    a_inp = dtype(np.random.randn(*a_shape))
+    with graph.as_default():
+        a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
+        output = tfOp(
+            x=a,
+            scale=scale,
+            offset=offset,
+            mean=mean,
+            variance=variance,
+            is_training=False,
+            name="output",
+        )
+    with tf.compat.v1.Session(graph=graph) as sess:
+        expected_output = sess.run(output, feed_dict={a: a_inp})
+    assert expected_output is not None
+    config = Config(backend).add_input(a).add_output(output)
+    compiler = Compiler(graph, config, test_dir)
+    mpc_output = compiler.compile_and_run([a_inp])
+    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
+    return
\ No newline at end of file
diff --git a/Athos/tests/tf/unittests/test_convolution.py b/Athos/tests/tf/unittests/test_convolution.py
new file mode 100644
index 0000000000000000000000000000000000000000..1cb24479848000d69c422f54264a1d6f79f6446e
--- /dev/null
+++ b/Athos/tests/tf/unittests/test_convolution.py
@@ -0,0 +1,90 @@
+import tensorflow as tf
+import numpy as np
+
+import pytest
+
+import sys
+import os
+
+# Athos DIR
+sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
+from tests.utils import Config, Compiler, assert_almost_equal
+
+
+@pytest.mark.parametrize(
+    "tfOp, a_shape, kernel_shape, strides, padding",
+    [
+        (tf.nn.conv2d, [1, 5, 5, 1], [2, 2, 1, 2], [1, 1, 1, 1], "SAME"),
+        (tf.nn.conv2d, [1, 5, 5, 1], [2, 2, 1, 2], [1, 1, 1, 1], "VALID"),
+        (tf.nn.conv3d, [1, 5, 5, 5, 1], [2, 2, 2, 1, 2], [1, 1, 1, 1, 1], "SAME"),
+        (tf.nn.conv3d, [1, 5, 5, 5, 1], [2, 2, 2, 1, 2], [1, 1, 1, 1, 1], "VALID"),
+    ],
+)
+@pytest.mark.parametrize("dtype", [np.single])
+def test_conv(test_dir, backend, tfOp, a_shape, kernel_shape, strides, padding, dtype):
+    graph = tf.Graph()
+    a_inp = dtype(np.random.randn(*a_shape))
+    kernel_inp = dtype(np.random.randn(*kernel_shape))
+    with graph.as_default():
+        a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
+        filters = tf.constant(kernel_inp, name="filter")
+        output = tfOp(a, filters, strides, padding, name="output")
+    with tf.compat.v1.Session(graph=graph) as sess:
+        expected_output = sess.run(output, feed_dict={a: a_inp})
+
+    config = Config(backend).add_input(a).add_output(output)
+    compiler = Compiler(graph, config, test_dir)
+    mpc_output = compiler.compile_and_run([a_inp])
+    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
+    return
+
+
+@pytest.mark.parametrize(
+    "tfOp, a_shape, kernel_shape, output_shape, strides, padding",
+    [
+        (
+            tf.nn.conv3d_transpose,
+            [1, 4, 4, 4, 2],
+            [2, 2, 2, 1, 2],
+            [1, 5, 5, 5, 1],
+            [1, 1, 1, 1, 1],
+            "VALID",
+        ),
+        pytest.param(
+            tf.nn.conv3d_transpose,
+            [1, 5, 5, 5, 2],
+            [2, 2, 2, 1, 2],
+            [1, 5, 5, 5, 1],
+            [1, 1, 1, 1, 1],
+            "SAME",
+            marks=pytest.mark.skip(reason="[conv3d_transpose] SAME padding bug"),
+        ),
+    ],
+)
+@pytest.mark.parametrize("dtype", [np.single])
+def test_conv_transpose(
+    test_dir,
+    backend,
+    tfOp,
+    a_shape,
+    kernel_shape,
+    output_shape,
+    strides,
+    padding,
+    dtype,
+):
+    graph = tf.Graph()
+    a_inp = dtype(np.random.randn(*a_shape))
+    kernel_inp = dtype(np.random.randn(*kernel_shape))
+    with graph.as_default():
+        a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
+        filters = tf.constant(kernel_inp, name="filter")
+        output = tfOp(a, filters, output_shape, strides, padding, name="output")
+    with tf.compat.v1.Session(graph=graph) as sess:
+        expected_output = sess.run(output, feed_dict={a: a_inp})
+
+    config = Config(backend).add_input(a).add_output(output)
+    compiler = Compiler(graph, config, test_dir)
+    mpc_output = compiler.compile_and_run([a_inp])
+    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
+    return
diff --git a/Athos/tests/tf/unittests/test_non_linear.py b/Athos/tests/tf/unittests/test_non_linear.py
new file mode 100644
index 0000000000000000000000000000000000000000..356fe6e546bc939efbd00e31b1d3cc0a38beeb1b
--- /dev/null
+++ b/Athos/tests/tf/unittests/test_non_linear.py
@@ -0,0 +1,58 @@
+import tensorflow as tf
+import numpy as np
+
+import pytest
+
+import sys
+import os
+
+# Athos DIR
+sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
+from tests.utils import Config, Compiler, assert_almost_equal
+
+
+@pytest.mark.skip(reason="[non-linear] Haven't made non-linear functionalities public")
+@pytest.mark.parametrize("a_shape", [(4, 4), (1,), ()])
+@pytest.mark.parametrize("dtype", [np.single])
+@pytest.mark.parametrize(
+    "tfOp",
+    [
+        tf.math.sqrt,
+        tf.math.rsqrt,
+        tf.math.sigmoid,
+        tf.math.tanh,
+        tf.nn.relu,
+    ],
+)
+def test_non_linear(test_dir, backend, tfOp, a_shape, dtype):
+    graph = tf.Graph()
+    a_inp = dtype(np.random.randn(*a_shape))
+    with graph.as_default():
+        a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
+        output = tfOp(a, name="output")
+    with tf.compat.v1.Session(graph=graph) as sess:
+        expected_output = sess.run(output, feed_dict={a: a_inp})
+    assert expected_output is not None
+    config = Config(backend).add_input(a).add_output(output)
+    compiler = Compiler(graph, config, test_dir)
+    mpc_output = compiler.compile_and_run([a_inp])
+    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
+    return
+
+@pytest.mark.skip(reason="[softmax] Haven't made non-linear functionalities public")
+@pytest.mark.parametrize("a_shape, axis", [((2, 3), 1), ((1,), 0)])
+@pytest.mark.parametrize("dtype", [np.single])
+def test_softmax(test_dir, backend, a_shape, axis, dtype):
+    graph = tf.Graph()
+    a_inp = dtype(np.random.randn(*a_shape))
+    with graph.as_default():
+        a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
+        output = tf.nn.softmax(a, axis=axis, name="output")
+    with tf.compat.v1.Session(graph=graph) as sess:
+        expected_output = sess.run(output, feed_dict={a: a_inp})
+    assert expected_output is not None
+    config = Config(backend).add_input(a).add_output(output)
+    compiler = Compiler(graph, config, test_dir)
+    mpc_output = compiler.compile_and_run([a_inp])
+    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
+    return
diff --git a/Athos/tests/tf/unittests/test_shape_manipulation.py b/Athos/tests/tf/unittests/test_shape_manipulation.py
new file mode 100644
index 0000000000000000000000000000000000000000..bba9e3aac08fe13f0e4a5e99890a6d374c9977f3
--- /dev/null
+++ b/Athos/tests/tf/unittests/test_shape_manipulation.py
@@ -0,0 +1,297 @@
+import tensorflow as tf
+import numpy as np
+
+import pytest
+
+import sys
+import os
+
+# Athos DIR
+sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
+from tests.utils import Config, Compiler, assert_almost_equal
+
+
+@pytest.mark.parametrize(
+    "a_shape, out_shape",
+    [
+        ([2, 3], [6]),
+        ([6], [2, 3]),
+        ([2, 3], [3, 2]),
+        ([2, 3], [-1]),  # Flatten 1-D
+        pytest.param(
+            [1], [], marks=pytest.mark.skip(reason="[reshape] dumping weights error")
+        ),  # convert to scalar
+        ([3, 2, 3], [2, -1]),  # infer -1 as 9
+        ([3, 2, 3], [-1, 9]),  # infer -1 as 2
+    ],
+)
+@pytest.mark.parametrize("dtype", [np.single])
+def test_reshape(test_dir, backend, a_shape, out_shape, dtype):
+    graph = tf.Graph()
+    a_inp = dtype(np.random.randn(*a_shape))
+    with graph.as_default():
+        a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
+        output = tf.reshape(a, out_shape, name="output")
+    with tf.compat.v1.Session(graph=graph) as sess:
+        expected_output = sess.run(output, feed_dict={a: a_inp})
+    assert expected_output is not None
+    config = Config(backend).add_input(a).add_output(output)
+    compiler = Compiler(graph, config, test_dir)
+    mpc_output = compiler.compile_and_run([a_inp])
+    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
+    return
+
+
+@pytest.mark.parametrize(
+    "a_shape, perm",
+    [([2, 3], [1, 0]), ([2, 4, 3], [0, 2, 1])],  # normal transpose, with perm
+)
+@pytest.mark.parametrize("dtype", [np.single])
+def test_transpose(test_dir, backend, a_shape, perm, dtype):
+    graph = tf.Graph()
+    a_inp = dtype(np.random.randn(*a_shape))
+    with graph.as_default():
+        a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
+        output = tf.transpose(a, perm, name="output")
+    with tf.compat.v1.Session(graph=graph) as sess:
+        expected_output = sess.run(output, feed_dict={a: a_inp})
+
+    config = Config(backend).add_input(a).add_output(output)
+    compiler = Compiler(graph, config, test_dir)
+    mpc_output = compiler.compile_and_run([a_inp])
+    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
+    return
+
+
+@pytest.mark.parametrize(
+    "a_shape, num_or_size_splits, axis",
+    [
+        ([2, 10], 5, 1),
+        pytest.param(
+            [5, 7],
+            [1, 4, 2],
+            1,
+            marks=pytest.mark.skip(
+                reason="[split] don't support split into specific sizes (SplitV)"
+            ),
+        ),
+    ],
+)
+@pytest.mark.parametrize("dtype", [np.single])
+def test_split(test_dir, backend, a_shape, num_or_size_splits, axis, dtype):
+    graph = tf.Graph()
+    a_inp = dtype(np.random.randn(*a_shape))
+    with graph.as_default():
+        a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
+        output = tf.split(a, num_or_size_splits, axis, name="output")
+    with tf.compat.v1.Session(graph=graph) as sess:
+        expected_output = sess.run(output, feed_dict={a: a_inp})
+
+    if type(output) == list:
+        tf_output = output[-1]
+        tf_expected_output = expected_output[-1]
+    else:
+        tf_output = output
+        tf_expected_output = expected_output
+    config = Config(backend).add_input(a).add_output(tf_output)
+    compiler = Compiler(graph, config, test_dir)
+    mpc_output = compiler.compile_and_run([a_inp])
+    assert_almost_equal(
+        tf_output=tf_expected_output, mpc_tensor=mpc_output, precision=2
+    )
+    return
+
+
+# Squeeze
+# TODO: also add a squeeze dim example.
+@pytest.mark.parametrize(
+    "a_shape, axis",
+    [
+        pytest.param(
+            [1, 2, 1, 3, 1, 1],
+            None,
+            marks=pytest.mark.skip(reason="[squeeze] Parametric squeeze not supported"),
+        ),
+        pytest.param(
+            [1, 2, 1, 3, 1, 1],
+            [2, 4],
+            marks=pytest.mark.skip(reason="[squeeze] Parametric squeeze not supported"),
+        ),
+    ],
+)
+@pytest.mark.parametrize("dtype", [np.single])
+def test_squeeze(test_dir, backend, a_shape, axis, dtype):
+    graph = tf.Graph()
+    a_inp = dtype(np.random.randn(*a_shape))
+    with graph.as_default():
+        a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
+        output = tf.squeeze(a, axis=axis, name="output")
+    with tf.compat.v1.Session(graph=graph) as sess:
+        expected_output = sess.run(output, feed_dict={a: a_inp})
+
+    config = Config(backend).add_input(a).add_output(output)
+    compiler = Compiler(graph, config, test_dir)
+    mpc_output = compiler.compile_and_run([a_inp])
+    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
+    return
+
+
+@pytest.mark.parametrize(
+    "a_shape, begin, size",
+    [
+        ([3, 2, 3], [1, 0, 0], [1, 1, 3]),
+        ([3, 2, 3], [1, 0, 0], [1, 2, 3]),
+        ([3, 2, 3], [1, 0, 0], [2, 1, 3]),
+    ],
+)
+@pytest.mark.parametrize("dtype", [np.single])
+def test_slice(test_dir, backend, a_shape, begin, size, dtype):
+    graph = tf.Graph()
+    a_inp = dtype(np.random.randn(*a_shape))
+    with graph.as_default():
+        a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
+        output = tf.slice(a, begin, size, name="output")
+    with tf.compat.v1.Session(graph=graph) as sess:
+        expected_output = sess.run(output, feed_dict={a: a_inp})
+
+    config = Config(backend).add_input(a).add_output(output)
+    compiler = Compiler(graph, config, test_dir)
+    mpc_output = compiler.compile_and_run([a_inp])
+    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
+    return
+
+
+@pytest.mark.parametrize(
+    "a_shape, b_shape, axis",
+    [
+        ([2, 3], [3, 3], 0),
+        ([2, 3, 2, 1], [2, 6, 2, 1], 1),
+    ],
+)
+@pytest.mark.parametrize("dtype", [np.single])
+def test_concat(test_dir, backend, a_shape, b_shape, axis, dtype):
+    graph = tf.Graph()
+    a_inp = dtype(np.random.randn(*a_shape))
+    b_inp = dtype(np.random.randn(*b_shape))
+    with graph.as_default():
+        a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
+        b = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=b_inp.shape, name="b")
+        output = tf.concat([a, b], axis, name="output")
+    with tf.compat.v1.Session(graph=graph) as sess:
+        expected_output = sess.run(output, feed_dict={a: a_inp, b: b_inp})
+
+    config = Config(backend).add_input(a).add_input(b).add_output(output)
+    compiler = Compiler(graph, config, test_dir)
+    mpc_output = compiler.compile_and_run([a_inp, b_inp])
+    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
+    return
+
+
+# ExpandDims
+@pytest.mark.parametrize(
+    "a_shape, axis",
+    [
+        pytest.param(
+            [3, 2, 3], 1, marks=pytest.mark.skip(reason="[expand_dims] not supported")
+        ),
+        pytest.param(
+            [2, 5], 0, marks=pytest.mark.skip(reason="[expand_dims] not supported")
+        ),
+    ],
+)
+@pytest.mark.parametrize("dtype", [np.single])
+def test_expand_dims(test_dir, backend, a_shape, axis, dtype):
+    graph = tf.Graph()
+    a_inp = dtype(np.random.randn(*a_shape))
+    with graph.as_default():
+        a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
+        output = tf.expand_dims(a, axis, name="output")
+    with tf.compat.v1.Session(graph=graph) as sess:
+        expected_output = sess.run(output, feed_dict={a: a_inp})
+
+    config = Config(backend).add_input(a).add_output(output)
+    compiler = Compiler(graph, config, test_dir)
+    mpc_output = compiler.compile_and_run([a_inp])
+    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
+    return
+
+
+# Pad
+@pytest.mark.parametrize(
+    "a_shape, paddings, mode, constant_values",
+    [
+        ([1, 2, 2, 1], [[1, 1], [1, 2], [1, 1], [1, 3]], "CONSTANT", 0),
+        pytest.param(
+            [1, 2, 2, 1],
+            [[1, 1], [1, 2], [1, 1], [1, 3]],
+            "REFLECT",
+            0,
+            marks=pytest.mark.skip(reason="[pad] REFLECT not supported"),
+        ),
+        pytest.param(
+            [1, 2, 2, 1],
+            [[1, 1], [1, 2], [1, 1], [1, 3]],
+            "SYMMETRIC",
+            0,
+            marks=pytest.mark.skip(reason="[pad] SYMMETRIC not supported"),
+        ),
+        pytest.param(
+            [2, 3],
+            [
+                [1, 1],
+                [2, 2],
+            ],
+            "CONSTANT",
+            0,
+            marks=pytest.mark.skip(reason="[pad] Generic pad not supported"),
+        ),
+        pytest.param(
+            [1, 2, 2, 1],
+            [[1, 1], [1, 2], [1, 1], [1, 3]],
+            "CONSTANT",
+            1.2,
+            marks=pytest.mark.skip(reason="[pad] non-zero padding not supported"),
+        ),
+    ],
+)
+@pytest.mark.parametrize("dtype", [np.single])
+def test_pad(test_dir, backend, a_shape, paddings, mode, constant_values, dtype):
+    graph = tf.Graph()
+    a_inp = dtype(np.random.randn(*a_shape))
+    with graph.as_default():
+        a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
+        pad = tf.constant(paddings, name="paddings")
+        output = tf.pad(
+            a, pad, mode=mode, constant_values=constant_values, name="output"
+        )
+    with tf.compat.v1.Session(graph=graph) as sess:
+        expected_output = sess.run(output, feed_dict={a: a_inp})
+
+    config = Config(backend).add_input(a).add_output(output)
+    compiler = Compiler(graph, config, test_dir)
+    mpc_output = compiler.compile_and_run([a_inp])
+    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
+    return
+
+
+# Tile
+@pytest.mark.parametrize(
+    "a_shape, multiples", [([2, 3], [1, 2]), ([2, 3], [2, 1]), ([2, 3], [2, 2])]
+)
+@pytest.mark.parametrize("dtype", [np.single])
+@pytest.mark.skip(reason="[tile] Not supported")
+def test_tile(test_dir, backend, a_shape, multiples, dtype):
+    graph = tf.Graph()
+    a_inp = dtype(np.random.randn(*a_shape))
+    with graph.as_default():
+        a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
+        mults = tf.constant(multiples, name="multiples")
+        output = tf.tile(a, mults, name="output")
+    with tf.compat.v1.Session(graph=graph) as sess:
+        expected_output = sess.run(output, feed_dict={a: a_inp})
+
+    config = Config(backend).add_input(a).add_output(output)
+    compiler = Compiler(graph, config, test_dir)
+    mpc_output = compiler.compile_and_run([a_inp])
+    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
+    return
diff --git a/Athos/tests/tf/unittests/test_unaryops.py b/Athos/tests/tf/unittests/test_unaryops.py
new file mode 100644
index 0000000000000000000000000000000000000000..519eb539c90c87c89458a6c31e2c22bbc46d7a58
--- /dev/null
+++ b/Athos/tests/tf/unittests/test_unaryops.py
@@ -0,0 +1,194 @@
+import tensorflow as tf
+import numpy as np
+
+import pytest
+
+import sys
+import os
+
+# Athos DIR
+sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
+from tests.utils import Config, Compiler, assert_almost_equal
+
+
+@pytest.mark.parametrize("a_shape", [[2, 2], []])
+@pytest.mark.parametrize("dtype", [np.single])
+@pytest.mark.parametrize(
+    "tfOp",
+    [
+        tf.math.square,
+        tf.math.negative,
+        pytest.param(
+            tf.math.floor,
+            marks=pytest.mark.skip(reason="[floor] Floor1 not implemented"),
+        ),
+        tf.shape,
+        tf.identity,
+        pytest.param(
+            tf.zeros_like, marks=pytest.mark.skip(reason="[zeros_like] EzPC issue for inp=[2,2]")
+        ),
+    ],
+)
+def test_uop(test_dir, backend, tfOp, a_shape, dtype):
+    graph = tf.Graph()
+    a_inp = dtype(np.random.randn(*a_shape))
+    with graph.as_default():
+        a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
+        output = tfOp(a, name="output")
+    with tf.compat.v1.Session(graph=graph) as sess:
+        expected_output = sess.run(output, feed_dict={a: a_inp})
+
+    config = Config(backend).add_input(a).add_output(output)
+    compiler = Compiler(graph, config, test_dir)
+    mpc_output = compiler.compile_and_run([a_inp])
+    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
+    return
+
+
+@pytest.mark.parametrize(
+    "a_shape, axis, keepdims",
+    [
+        ([3, 2], None, False),
+        ([3, 2], [0, 1], False),
+        ([3, 2], 0, False),
+        ([3, 2], 1, False),
+        ([3, 2], 0, True),
+    ],
+)
+@pytest.mark.parametrize("dtype", [np.single])
+@pytest.mark.parametrize("tfOp", [tf.math.reduce_mean, tf.reduce_sum])
+@pytest.mark.skip(reason="[reduce] Reduce mean assert shape failure")
+def test_reduce(test_dir, backend, tfOp, a_shape, axis, keepdims, dtype):
+    graph = tf.Graph()
+    a_inp = dtype(np.random.randn(*a_shape))
+    with graph.as_default():
+        a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
+        output = tfOp(a, axis=axis, keepdims=keepdims, name="output")
+    with tf.compat.v1.Session(graph=graph) as sess:
+        expected_output = sess.run(output, feed_dict={a: a_inp})
+
+    config = Config(backend).add_input(a).add_output(output)
+    compiler = Compiler(graph, config, test_dir)
+    mpc_output = compiler.compile_and_run([a_inp])
+    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
+    return
+
+
+@pytest.mark.parametrize(
+    "a_shape, axis",
+    [
+        ([3, 2], None),
+        ([3, 2], 0),
+        ([3, 2], 1),
+    ],
+)
+@pytest.mark.parametrize("dtype", [np.single])
+@pytest.mark.skip(reason="[argmax] Generic argmax not implemented")
+def test_argmax(test_dir, backend, a_shape, axis, dtype):
+    graph = tf.Graph()
+    a_inp = dtype(np.random.randn(*a_shape))
+    with graph.as_default():
+        a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
+        output = tf.math.argmax(a, axis=axis, name="output")
+    with tf.compat.v1.Session(graph=graph) as sess:
+        expected_output = sess.run(output, feed_dict={a: a_inp})
+
+    config = Config(backend).add_input(a).add_output(output)
+    compiler = Compiler(graph, config, test_dir)
+    mpc_output = compiler.compile_and_run([a_inp])
+    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
+    return
+
+
+# NHWC is the default format
+@pytest.mark.parametrize(
+    "a_shape, ksize, strides, padding, data_format",
+    [
+        ([1, 5, 5, 1], [1, 2, 2, 1], [1, 2, 2, 1], "VALID", "NHWC"),
+        pytest.param(
+            [1, 5, 5, 1],
+            [1, 2, 2, 1],
+            [1, 2, 2, 1],
+            "SAME",
+            "NHWC",
+            marks=pytest.mark.skip(reason="[max/avg_pool] Pooling SAME pad bug"),
+        ),
+    ],
+)
+@pytest.mark.parametrize("dtype", [np.single])
+@pytest.mark.parametrize("tfOp", [tf.nn.max_pool, tf.nn.avg_pool])
+def test_pool(
+    test_dir, backend, tfOp, a_shape, ksize, strides, padding, data_format, dtype
+):
+    graph = tf.Graph()
+    a_inp = dtype(np.random.randn(*a_shape))
+    with graph.as_default():
+        a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
+        output = tfOp(
+            a,
+            ksize=ksize,
+            strides=strides,
+            padding=padding,
+            data_format=data_format,
+            name="output",
+        )
+    with tf.compat.v1.Session(graph=graph) as sess:
+        expected_output = sess.run(output, feed_dict={a: a_inp})
+
+    config = Config(backend).add_input(a).add_output(output)
+    compiler = Compiler(graph, config, test_dir)
+    mpc_output = compiler.compile_and_run([a_inp])
+    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
+    return
+
+
+# x = tf.constant([1.8, 2.2], dtype=tf.float32)
+# tf.dtypes.cast(x, tf.int32)
+# Currently cast acts as an identity operation.
+@pytest.mark.parametrize("a_shape", [[2, 2]])
+@pytest.mark.parametrize(
+    "from_dtype, to_dtype",
+    [
+        (np.single, np.single),
+        (
+            np.double,
+            np.single,
+        ),
+        pytest.param(
+            np.single,
+            np.int32,
+            marks=pytest.mark.skip(reason="[cast] Only support identity cast"),
+        ),
+    ],
+)
+def test_cast(test_dir, backend, a_shape, from_dtype, to_dtype):
+    graph = tf.Graph()
+    a_inp = from_dtype(np.random.randn(*a_shape))
+    with graph.as_default():
+        a = tf.compat.v1.placeholder(
+            tf.as_dtype(from_dtype), shape=a_inp.shape, name="a"
+        )
+        output = tf.cast(a, to_dtype, name="output")
+    with tf.compat.v1.Session(graph=graph) as sess:
+        expected_output = sess.run(output, feed_dict={a: a_inp})
+
+    config = Config(backend).add_input(a).add_output(output)
+    compiler = Compiler(graph, config, test_dir)
+    mpc_output = compiler.compile_and_run([a_inp])
+    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
+    return
+
+
+@pytest.mark.parametrize("a_shape, value", [([2, 2], 9.2), ([], 9.2), ([2, 2], 1)])
+def test_fill(test_dir, backend, a_shape, value):
+    graph = tf.Graph()
+    with graph.as_default():
+        output = tf.fill(a_shape, value)
+    with tf.compat.v1.Session(graph=graph) as sess:
+        expected_output = sess.run(output)
+
+    config = Config(backend).add_output(output)
+    compiler = Compiler(graph, config, test_dir)
+    mpc_output = compiler.compile_and_run([])
+    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
+    return
\ No newline at end of file
diff --git a/Athos/tests/utils.py b/Athos/tests/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..c678079b36155a23e8bb36c39a3537476433bce7
--- /dev/null
+++ b/Athos/tests/utils.py
@@ -0,0 +1,199 @@
+import tempfile
+import sys
+import os
+import shutil
+import re
+
+sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
+import CompilerScripts.parse_config as parse_config
+import CompileTFGraph
+
+import numpy as np
+import subprocess
+import threading
+
+
+class Config:
+    def __init__(self, mode):
+        self.config = {
+            "model_name": "model.pb",
+            "scale": 23,
+            "bitlength": 64,
+            "save_weights": True,
+        }
+        if mode == "CPP":
+            self.config["target"] = "CPP"
+        elif mode == "3PC":
+            self.config["target"] = "PORTHOS"
+        elif mode == "2PC_OT":
+            self.config["target"] = "PORTHOS2PC"
+            self.config["bitlength"] = 41
+            self.config["backend"] = "OT"
+
+        elif mode == "2PC_HE":
+            self.config["target"] = "PORTHOS2PC"
+            self.config["bitlength"] = 41
+            self.config["backend"] = "HE"
+        else:
+            assert False, "Mode has to be one of CPP/3PC/2PC_OT/2PC_HE"
+
+    def add_input(self, tensor_op):
+        input_name = tensor_op.op.name
+        shape = tensor_op.shape.as_list()
+        shape_string = ",".join(map(str, shape))
+        inputs = self.config.get("input_tensors")
+        if inputs == None:
+            self.config["input_tensors"] = {input_name: shape_string}
+        else:
+            self.config["input_tensors"][input_name] = shape_string
+        return self
+
+    def add_output(self, tensor_op):
+        output_name = tensor_op.op.name
+        outputs = self.config.get("output_tensors")
+        if outputs == None:
+            self.config["output_tensors"] = [output_name]
+        else:
+            self.config["output_tensors"].append(output_name)
+        return self
+
+
+def get_params(config):
+    return parse_config.parse_config(config)
+
+
+def make_dir(path):
+    if os.path.exists(path):
+        shutil.rmtree(path, ignore_errors=True)
+    else:
+        os.mkdir(path)
+    return
+
+
+def save_graph(graph_def, config, test_dir):
+    fname = config["model_name"]
+    fpath = os.path.join(test_dir, fname)
+    with open(fpath, "wb") as f:
+        f.write(graph_def.SerializeToString())
+        print("\n\nfile  name: ", f.name, "\n\n\n")
+    config["model_name"] = fpath
+    return
+
+
+def convert_raw_output_to_np(filename, bitlength, scale):
+    matcher = re.compile(r"[-]?[0-9]+")
+    scaled_array = []
+    with open(filename, "r") as f:
+        for line in f:
+            match = matcher.fullmatch(line.rstrip())
+            if match:
+                unsigned_number = int(match.group(0))
+                number = (
+                    unsigned_number
+                    if (unsigned_number < 2 ** (bitlength - 1))
+                    else unsigned_number - 2 ** bitlength
+                )
+                scaled_array.append(float(number) / (2 ** scale))
+    return np.array(scaled_array)
+
+
+class Program:
+    def __init__(self, program_path, model_weight_path, params, test_dir):
+        self.program_path = program_path
+        self.model_weight_path = model_weight_path
+        self.scale = params["scale"]
+        self.bitlength = params["bitlength"]
+        self.target = params["target"]
+        self.test_dir = test_dir
+
+    def run(self, inputs):
+        # scale input and dump to file
+        inputs_scaled = os.path.join(
+            self.test_dir, "input_fixedpt_scale_" + str(self.scale) + ".inp"
+        )
+        with open(inputs_scaled, "w") as ff:
+            for i in inputs:
+                for xx in np.nditer(i, order="C"):
+                    ff.write(str(int(xx * (1 << self.scale))) + " ")
+                ff.write("\n")
+        raw_output = os.path.join(self.test_dir, "raw_output")
+        if self.target == "CPP":
+            os.system(
+                "cat {inputs} {weights} | {program} > {output}".format(
+                    program=self.program_path,
+                    inputs=inputs_scaled,
+                    weights=self.model_weight_path,
+                    output=raw_output,
+                )
+            )
+        elif self.target == "PORTHOS":
+            util_dir = os.path.dirname(os.path.abspath(__file__))
+            porthos_dir = os.path.join(util_dir, "..", "..", "Porthos")
+            ip_addr = os.path.join(porthos_dir, "files", "addresses")
+            keys_dir = os.path.join(porthos_dir, "files", "keys")
+            client_cmd = (
+                "{program} 0 {ip_addr_file} {keys_dir} < {input} > {output}".format(
+                    program=self.program_path,
+                    ip_addr_file=ip_addr,
+                    input=inputs_scaled,
+                    output=raw_output,
+                    keys_dir=keys_dir,
+                )
+            )
+            server_cmd = "{program} 1 {ip_addr_file} {keys_dir} < {input}".format(
+                program=self.program_path,
+                ip_addr_file=ip_addr,
+                input=self.model_weight_path,
+                keys_dir=keys_dir,
+            )
+            party2_cmd = "{program} 2 {ip_addr_file} {keys_dir}".format(
+                program=self.program_path, ip_addr_file=ip_addr, keys_dir=keys_dir
+            )
+            commands = [client_cmd, server_cmd, party2_cmd]
+            procs = [subprocess.Popen(i, shell=True) for i in commands]
+            for p in procs:
+                p.wait()
+        elif self.target == "PORTHOS2PC":
+            util_dir = os.path.dirname(os.path.abspath(__file__))
+            sci_dir = os.path.join(util_dir, "..", "..", "SCI")
+            port = 1234
+            client_cmd = "{program} r=2 p={port} < {input} > {output}".format(
+                program=self.program_path,
+                port=port,
+                input=inputs_scaled,
+                output=raw_output,
+            )
+            server_cmd = "{program} r=1 p={port} < {input} > /dev/null".format(
+                program=self.program_path,
+                port=port,
+                input=self.model_weight_path,
+                output=raw_output,
+            )
+            commands = [client_cmd, server_cmd]
+            procs = [subprocess.Popen(i, shell=True) for i in commands]
+            for p in procs:
+                p.wait()
+        return convert_raw_output_to_np(raw_output, self.bitlength, self.scale)
+
+
+class Compiler:
+    def __init__(self, graph, config, test_dir):
+        self.graph_def = graph.as_graph_def()
+        self.config = config.config
+        self.test_dir = test_dir
+
+    def compile_and_run(self, inputs):
+        save_graph(self.graph_def, self.config, self.test_dir)
+        params = get_params(self.config)
+        print(params)
+        (output_program, model_weight_file) = CompileTFGraph.generate_code(params)
+        prog = Program(output_program, model_weight_file, params, self.test_dir)
+        output = prog.run(inputs)
+        return output
+
+
+def assert_almost_equal(tf_output, mpc_tensor, precision):
+    if tf_output.shape == (0,):
+        return
+    np.testing.assert_almost_equal(tf_output.flatten(), mpc_tensor, decimal=precision)
+    return