diff --git a/analyze/memcached_qps_scan.py b/analyze/memcached_qps_scan.py
index ae98f359c087b79d3fcb276bab03a4ae71f71384..48194dc8a016e8a2e419983807303d1885f4de1c 100644
--- a/analyze/memcached_qps_scan.py
+++ b/analyze/memcached_qps_scan.py
@@ -1,6 +1,7 @@
 import os
 import matplotlib.pyplot as plt
 from config import markers, colors
+import util
 
 experiments = [f for f in os.listdir('../data')
     if f.startswith('memcached_qps_scan') and len(os.listdir(os.path.join('../data', f))) > 0]
@@ -12,18 +13,22 @@ def parse_memcached_output(s):
     return (qps, latency_99th)
 
 def extract_qps_latency_exp(exp):
-    ret = ([], [])
+    ret = ([], [], [])
 
     for f in os.listdir(os.path.join('../data/', exp)):
         if not f[0].isdigit():
             continue
 
-        with open(os.path.join('../data/', exp, f), 'r') as f:
-            (qps, latency_99th) = parse_memcached_output(f.read())
-            if len(ret[0]) > 0 and qps < ret[0][-1]:
-                continue
-            ret[0].append(qps)
-            ret[1].append(latency_99th)
+        if f.split('_')[-1].startswith('aux'):
+            continue
+
+        ((qps_mean, latency_99th_mean), (qps_stddev, latency_99th_stddev)) = \
+            util.extract_exp_avg_stddev(exp, f.replace('.txt', ''), parse_memcached_output)
+
+        ret[0].append(qps_mean)
+        ret[1].append(latency_99th_mean)
+        # We don't care about stddev in qps -- qps is the x axis
+        ret[2].append(latency_99th_stddev)
 
     return ret
 
@@ -60,10 +65,10 @@ def show_plot(threads, include_caladan = False):
         # but we half the QPS
         name = 'caladan (per-thread QPS w/ 2 threads)'
         data[name] = extract_qps_latency_exp('memcached_qps_scan.5.4.0-136-generic.caladan.t2.c160')
-        data[name] = ([qps / 2 for qps in data[name][0]], data[name][1])
+        data[name] = ([qps / 2 for qps in data[name][0]], data[name][1], data[name][2])
 
     for k in data:
-        plt.plot(data[k][0], data[k][1], label = k, marker = markers[k], color = colors[k])
+        plt.errorbar(data[k][0], data[k][1], yerr = data[k][2], capsize = 4, label = k, marker = markers[k], color = colors[k])
 
     plt.xlabel('QPS')
     plt.ylabel('Latency (99th percentile)')
diff --git a/analyze/util.py b/analyze/util.py
new file mode 100644
index 0000000000000000000000000000000000000000..c08a71b460d80ee0552f937cc75d1e6a5a9e0ac5
--- /dev/null
+++ b/analyze/util.py
@@ -0,0 +1,51 @@
+import os
+import math
+
+def extract_exp_avg_stddev(exp, datapoint, parser):
+    def my_parser(f):
+        p = os.path.join(os.getcwd(), '../data/', exp, f + '.txt')
+
+        if not os.path.exists(p):
+            return None
+        with open(p, 'r') as fi:
+            return parser(fi.read())
+
+    acc = []
+    acc.append(my_parser(datapoint))
+    tmp = None
+    i = 0
+    while True:
+        tmp = my_parser('%s_aux%d' % (datapoint, i + 1))
+        if tmp == None:
+            break
+        acc.append(tmp)
+        i += 1
+
+    mean = None
+    stddev = None
+    if type(acc[0]) is tuple:
+        mean = [0 for _ in range(len(acc[0]))]
+        stddev = [0 for _ in range(len(acc[0]))]
+        for j in range(len(acc[0])):
+            for i in range(len(acc)):
+                mean[j] += acc[i][j]
+            mean[j] /= len(acc)
+            for i in range(len(acc)):
+                stddev[j] += pow(acc[i][j] - mean[j], 2)
+            stddev[j] /= len(acc)
+            stddev[j] = math.sqrt(stddev[j])
+
+        mean = tuple(mean)
+        stddev = tuple(stddev)
+    else:
+        mean = 0
+        stddev = 0
+        for i in range(len(acc)):
+            mean += acc[i]
+        mean /= len(acc)
+        for i in range(len(acc)):
+            stddev += pow(acc[i] - mean, 2)
+        stddev /= len(acc)
+        stddev = math.sqrt(stddev)
+
+    return (mean, stddev)