diff --git a/CovidABM/README.md b/CovidABM/README.md
deleted file mode 100644
index 76414e28273685023816a8dfbd84fb6e3ec4d0d2..0000000000000000000000000000000000000000
--- a/CovidABM/README.md
+++ /dev/null
@@ -1 +0,0 @@
-# CovidAlertVaccinationModel
diff --git a/CovidABM/src/CovidAlertVaccinationModel.jl b/CovidABM/src/CovidAlertVaccinationModel.jl
index 696dc24ce08c536f1158dd35928508cd1dcbde0b..2cbdd68033a9562aea5dc253231d0ae9ed6b0f47 100644
--- a/CovidABM/src/CovidAlertVaccinationModel.jl
+++ b/CovidABM/src/CovidAlertVaccinationModel.jl
@@ -1,5 +1,5 @@
 module CovidAlertVaccinationModel
-export get_u_0, solve!,get_parameters,plot_model,main,parse_cases_data,AgentModel,vaccination_rate_test,vaccinate_uniformly!
+# export get_u_0, solve!,get_parameters,plot_model,main,parse_cases_data,AgentModel,vaccination_rate_test,vaccinate_uniformly!
 using LightGraphs
 using RandomNumbers.Xorshifts
 using Random
@@ -48,7 +48,7 @@ function main()
     # @btime solve!($u_0,$get_parameters(),$steps,$agent_model,$vaccinate_uniformly!);
     sol,graphs = solve!(u_0,get_parameters(),steps,agent_model,vaccinate_uniformly!);
     return aggregate_timeseries(sol)
-    # plot_model_spatial_gif(agent_model.base_network,graphs,sol1)
+    plot_model_spatial_gif(agent_model.base_network,graphs,sol1)
 end
 
 
diff --git a/CovidABM/src/workflows/CompatHelper.yml b/CovidABM/src/workflows/CompatHelper.yml
deleted file mode 100644
index cba9134c670f0708cf98c92f7fdef055a6c7f5d3..0000000000000000000000000000000000000000
--- a/CovidABM/src/workflows/CompatHelper.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-name: CompatHelper
-on:
-  schedule:
-    - cron: 0 0 * * *
-  workflow_dispatch:
-jobs:
-  CompatHelper:
-    runs-on: ubuntu-latest
-    steps:
-      - name: Pkg.add("CompatHelper")
-        run: julia -e 'using Pkg; Pkg.add("CompatHelper")'
-      - name: CompatHelper.main()
-        env:
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-          COMPATHELPER_PRIV: ${{ secrets.DOCUMENTER_KEY }}
-        run: julia -e 'using CompatHelper; CompatHelper.main()'
diff --git a/CovidABM/src/workflows/TagBot.yml b/CovidABM/src/workflows/TagBot.yml
deleted file mode 100644
index f49313b662013f43aac7de2c738e1163a9715ff4..0000000000000000000000000000000000000000
--- a/CovidABM/src/workflows/TagBot.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-name: TagBot
-on:
-  issue_comment:
-    types:
-      - created
-  workflow_dispatch:
-jobs:
-  TagBot:
-    if: github.event_name == 'workflow_dispatch' || github.actor == 'JuliaTagBot'
-    runs-on: ubuntu-latest
-    steps:
-      - uses: JuliaRegistries/TagBot@v1
-        with:
-          token: ${{ secrets.GITHUB_TOKEN }}
-          ssh: ${{ secrets.DOCUMENTER_KEY }}
diff --git a/CovidABM/Manifest.toml b/IntervalsModel/Manifest.toml
similarity index 88%
rename from CovidABM/Manifest.toml
rename to IntervalsModel/Manifest.toml
index cbf9bb04055aff9877322afa9e81de096751af6d..dcd5e02f5ca567557f54ee2dc877d2a084282b56 100644
--- a/CovidABM/Manifest.toml
+++ b/IntervalsModel/Manifest.toml
@@ -1,10 +1,22 @@
 # This file is machine-generated - editing it directly is not advised
 
+[[AbstractMCMC]]
+deps = ["BangBang", "ConsoleProgressMonitor", "Distributed", "Logging", "LoggingExtras", "ProgressLogging", "Random", "StatsBase", "TerminalLoggers", "Transducers"]
+git-tree-sha1 = "c8988bdf7cb820932be1c8bd59dcbb806279e36f"
+uuid = "80f14c24-f653-4e6a-9b94-39d6b0f70001"
+version = "2.2.1"
+
+[[AbstractTrees]]
+deps = ["Markdown"]
+git-tree-sha1 = "33e450545eaf7699da1a6e755f9ea65f14077a45"
+uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c"
+version = "0.3.3"
+
 [[Adapt]]
 deps = ["LinearAlgebra"]
-git-tree-sha1 = "345a14764e43fe927d6f5c250fe4c8e4664e6ee8"
+git-tree-sha1 = "ffcfa2d345aaee0ef3d8346a073d5dd03c983ebe"
 uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
-version = "2.4.0"
+version = "3.2.0"
 
 [[ArgCheck]]
 git-tree-sha1 = "dedbbb2ddb876f899585c4ec4433265e3017215a"
@@ -22,9 +34,9 @@ version = "0.1.0"
 
 [[ArrayInterface]]
 deps = ["IfElse", "LinearAlgebra", "Requires", "SparseArrays"]
-git-tree-sha1 = "b9c3166c0124f44135419a394f42912c14dcbd80"
+git-tree-sha1 = "ee07ae00e3cc277dcfa5507ce25be522313ecc3e"
 uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9"
-version = "3.0.1"
+version = "3.1.1"
 
 [[Artifacts]]
 uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
@@ -70,9 +82,9 @@ version = "0.9.2"
 
 [[ChainRulesCore]]
 deps = ["Compat", "LinearAlgebra", "SparseArrays"]
-git-tree-sha1 = "53fed426c9af1eb68e63b3999e96454c2db79757"
+git-tree-sha1 = "d3d0a4e0d5bc03a6c97f4d249c8a471fc20a2f33"
 uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
-version = "0.9.27"
+version = "0.9.28"
 
 [[ColorSchemes]]
 deps = ["ColorTypes", "Colors", "FixedPointNumbers", "Random", "StaticArrays"]
@@ -107,6 +119,12 @@ git-tree-sha1 = "f3955eb38944e5dd0fabf8ca1e267d94941d34a5"
 uuid = "a33af91c-f02d-484b-be07-31d278c5ca2b"
 version = "0.1.0"
 
+[[ConsoleProgressMonitor]]
+deps = ["Logging", "ProgressMeter"]
+git-tree-sha1 = "3ab7b2136722890b9af903859afcf457fa3059e8"
+uuid = "88cd18e8-d9cc-4ea6-8889-5259c0d15c8b"
+version = "0.1.2"
+
 [[ConstructionBase]]
 git-tree-sha1 = "a2a6a5fea4d6f730ec4c18a76d27ec10e8ec1c50"
 uuid = "187b0558-2788-49d3-abe0-74a17ed4e7c9"
@@ -118,6 +136,12 @@ git-tree-sha1 = "9f02045d934dc030edad45944ea80dbd1f0ebea7"
 uuid = "d38c429a-6771-53c6-b99e-75d170b6e991"
 version = "0.5.7"
 
+[[CovidAlertVaccinationModel]]
+deps = ["BenchmarkTools", "CSV", "DataFrames", "Dates", "DelimitedFiles", "Distributions", "ImportAll", "Intervals", "LabelledArrays", "LightGraphs", "NamedTupleTools", "NetworkLayout", "Pipe", "Plots", "Random", "RandomNumbers", "StatsBase", "ThreadsX", "UnPack"]
+path = "../CovidABM"
+uuid = "9260c4ec-b5cf-4bc2-ad29-e1d23bf2bd6f"
+version = "0.1.0"
+
 [[Crayons]]
 git-tree-sha1 = "3f71217b538d7aaee0b69ab47d9b7724ca8afa0d"
 uuid = "a8cc5b0e-0ffa-5ad4-8c14-923d3ee1735f"
@@ -255,9 +279,9 @@ version = "3.3.2+1"
 
 [[GR]]
 deps = ["Base64", "DelimitedFiles", "GR_jll", "HTTP", "JSON", "LinearAlgebra", "Pkg", "Printf", "Random", "Serialization", "Sockets", "Test", "UUIDs"]
-git-tree-sha1 = "b90b826782cb3ac5b7a7f41b3fd0113180257ed4"
+git-tree-sha1 = "aaebdf5588281c2902f499b49e67953f2b409c9c"
 uuid = "28b8d3ca-fb5f-59d9-8090-bfdbd6d07a71"
-version = "0.53.0"
+version = "0.54.0"
 
 [[GR_jll]]
 deps = ["Artifacts", "Bzip2_jll", "Cairo_jll", "FFMPEG_jll", "Fontconfig_jll", "GLFW_jll", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Libtiff_jll", "Pixman_jll", "Pkg", "Qt_jll", "Zlib_jll", "libpng_jll"]
@@ -267,9 +291,9 @@ version = "0.53.0+0"
 
 [[GeometryBasics]]
 deps = ["EarCut_jll", "IterTools", "LinearAlgebra", "StaticArrays", "StructArrays", "Tables"]
-git-tree-sha1 = "f574945bcabe9805b78292216279c1be910168bb"
+git-tree-sha1 = "4d4f72691933d5b6ee1ff20e27a102c3ae99d123"
 uuid = "5c1252a2-5f33-56bf-86c9-59e7332b4326"
-version = "0.3.8"
+version = "0.3.9"
 
 [[Gettext_jll]]
 deps = ["Artifacts", "JLLWrappers", "Libdl", "Libiconv_jll", "Pkg", "XML2_jll"]
@@ -289,10 +313,22 @@ uuid = "42e2da0e-8278-4e71-bc24-59509adca0fe"
 version = "1.0.0"
 
 [[HTTP]]
-deps = ["Base64", "Dates", "IniFile", "MbedTLS", "Sockets"]
-git-tree-sha1 = "c7ec02c4c6a039a98a15f955462cd7aea5df4508"
+deps = ["Base64", "Dates", "IniFile", "MbedTLS", "Sockets", "URIs"]
+git-tree-sha1 = "942c1a9c750bbe79912b7bd060a420932afd35b8"
 uuid = "cd3eb016-35fb-5094-929b-558a96fad6f3"
-version = "0.8.19"
+version = "0.9.3"
+
+[[Hwloc]]
+deps = ["Hwloc_jll"]
+git-tree-sha1 = "2e3d1d4ab0e7296354539b2be081f71f4b694c0b"
+uuid = "0e44f5e4-bd66-52a0-8798-143a42290a1d"
+version = "1.2.0"
+
+[[Hwloc_jll]]
+deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
+git-tree-sha1 = "1179250d910c99810d8a7ff55c50c4ed68c77a58"
+uuid = "e33a78d0-f292-5ffc-b300-72abe9b543c8"
+version = "2.4.0+0"
 
 [[IfElse]]
 git-tree-sha1 = "28e837ff3e7a6c3cdb252ce49fb412c8eb3caeef"
@@ -364,6 +400,12 @@ git-tree-sha1 = "9aff0587d9603ea0de2c6f6300d9f9492bbefbd3"
 uuid = "aacddb02-875f-59d6-b918-886e6ef4fbf8"
 version = "2.0.1+3"
 
+[[KissABC]]
+deps = ["AbstractMCMC", "Distributions", "MonteCarloMeasurements", "Random"]
+git-tree-sha1 = "693d4bd4cbb9ad6515414e0935bbd646b15fc16a"
+uuid = "9c9dad79-530a-4643-a18b-2704674d4108"
+version = "3.0.1"
+
 [[LAME_jll]]
 deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
 git-tree-sha1 = "df381151e871f41ee86cee4f5f6fd598b8a68826"
@@ -393,6 +435,12 @@ git-tree-sha1 = "3a0084cec7bf157edcb45a67fac0647f88fe5eaf"
 uuid = "23fbe1c1-3f47-55db-b15f-69d7ec21a316"
 version = "0.14.7"
 
+[[LeftChildRightSiblingTrees]]
+deps = ["AbstractTrees"]
+git-tree-sha1 = "71be1eb5ad19cb4f61fa8c73395c0338fd092ae0"
+uuid = "1d6d02ad-be62-4b6b-8a6d-2f90e265016e"
+version = "0.1.2"
+
 [[LibCURL]]
 deps = ["LibCURL_jll", "MozillaCACerts_jll"]
 uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
@@ -479,6 +527,12 @@ uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
 [[Logging]]
 uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
 
+[[LoggingExtras]]
+deps = ["Dates"]
+git-tree-sha1 = "95e675246b58a7cb1199a90cd50a25850ff215c7"
+uuid = "e6f89c97-d47a-5376-807f-9c37f3926c36"
+version = "0.4.5"
+
 [[MacroTools]]
 deps = ["Markdown", "Random"]
 git-tree-sha1 = "6a8a2a625ab0dea913aba95c11370589e0239ff0"
@@ -525,6 +579,19 @@ git-tree-sha1 = "916b850daad0d46b8c71f65f719c49957e9513ed"
 uuid = "78c3b35d-d492-501b-9361-3d52fe80e533"
 version = "0.7.1"
 
+[[ModularIntervals]]
+git-tree-sha1 = "3ccfdb7165cb65620676e4f69865d1e4d169b667"
+repo-rev = "master"
+repo-url = "../ModularIntervals"
+uuid = "1e07f51d-1de3-4a1c-a782-ac336877d585"
+version = "0.1.0"
+
+[[MonteCarloMeasurements]]
+deps = ["Distributed", "Distributions", "LinearAlgebra", "MacroTools", "Random", "RecipesBase", "Requires", "SLEEFPirates", "StaticArrays", "Statistics", "StatsBase", "Test"]
+git-tree-sha1 = "b7afe11a705b925bacfd595d9cac092c8c0dd2fc"
+uuid = "0987c9cc-fe09-11e8-30f0-b96dd679fdca"
+version = "0.10.0"
+
 [[MozillaCACerts_jll]]
 uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
 
@@ -611,9 +678,9 @@ uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
 
 [[PlotThemes]]
 deps = ["PlotUtils", "Requires", "Statistics"]
-git-tree-sha1 = "c6f5ea535551b3b16835134697f0c65d06c94b91"
+git-tree-sha1 = "a3a964ce9dc7898193536002a6dd892b1b5a6f1d"
 uuid = "ccf2f8ad-2431-5c83-bf29-c5338b663b6a"
-version = "2.0.0"
+version = "2.0.1"
 
 [[PlotUtils]]
 deps = ["ColorSchemes", "Colors", "Dates", "Printf", "Random", "Reexport", "Statistics"]
@@ -623,9 +690,9 @@ version = "1.0.10"
 
 [[Plots]]
 deps = ["Base64", "Contour", "Dates", "FFMPEG", "FixedPointNumbers", "GR", "GeometryBasics", "JSON", "Latexify", "LinearAlgebra", "Measures", "NaNMath", "PlotThemes", "PlotUtils", "Printf", "REPL", "Random", "RecipesBase", "RecipesPipeline", "Reexport", "Requires", "Scratch", "Showoff", "SparseArrays", "Statistics", "StatsBase", "UUIDs"]
-git-tree-sha1 = "3acf7ee21b0c0ea99ef0815e7768c1c0fde82629"
+git-tree-sha1 = "cab13323a50caf17432793269677b289234f02ca"
 uuid = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
-version = "1.10.2"
+version = "1.10.4"
 
 [[PooledArrays]]
 deps = ["DataAPI"]
@@ -643,6 +710,18 @@ version = "0.11.0"
 deps = ["Unicode"]
 uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
 
+[[ProgressLogging]]
+deps = ["Logging", "SHA", "UUIDs"]
+git-tree-sha1 = "80d919dee55b9c50e8d9e2da5eeafff3fe58b539"
+uuid = "33c8b6b6-d38a-422a-b730-caa89a2f386c"
+version = "0.1.4"
+
+[[ProgressMeter]]
+deps = ["Distributed", "Printf"]
+git-tree-sha1 = "45640774ee2efa24e52686dbdf895e88102e68fc"
+uuid = "92933f4c-e287-5a05-a399-4b506db050ca"
+version = "1.4.1"
+
 [[Qt_jll]]
 deps = ["Artifacts", "CompilerSupportLibraries_jll", "Fontconfig_jll", "Glib_jll", "JLLWrappers", "Libdl", "Libglvnd_jll", "OpenSSL_jll", "Pkg", "Xorg_libXext_jll", "Xorg_libxcb_jll", "Xorg_xcb_util_image_jll", "Xorg_xcb_util_keysyms_jll", "Xorg_xcb_util_renderutil_jll", "Xorg_xcb_util_wm_jll", "Zlib_jll", "xkbcommon_jll"]
 git-tree-sha1 = "7760cfea90bec61814e31dfb204fa4b81bba7b57"
@@ -676,9 +755,9 @@ version = "1.1.1"
 
 [[RecipesPipeline]]
 deps = ["Dates", "NaNMath", "PlotUtils", "RecipesBase"]
-git-tree-sha1 = "9ea2f5bf1b26918b16e9f885bb8e05206bfc2144"
+git-tree-sha1 = "c4d54a78e287de7ec73bbc928ce5eb3c60f80b24"
 uuid = "01d81517-befc-4cb6-b9ec-a95719d0359c"
-version = "0.2.1"
+version = "0.3.1"
 
 [[Reexport]]
 git-tree-sha1 = "57d8440b0c7d98fc4f889e478e80f268d534c9d5"
@@ -711,6 +790,12 @@ version = "0.2.2+1"
 [[SHA]]
 uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
 
+[[SLEEFPirates]]
+deps = ["IfElse", "Libdl", "VectorizationBase"]
+git-tree-sha1 = "ab6194c92dcf38036cd9513e4ab12cd76a613da1"
+uuid = "476501e8-09a2-5ece-8869-fb82de89a1fa"
+version = "0.6.10"
+
 [[Scratch]]
 deps = ["Dates"]
 git-tree-sha1 = "ad4b278adb62d185bbcb6864dc24959ab0627bf6"
@@ -797,9 +882,9 @@ version = "0.9.6"
 
 [[StructArrays]]
 deps = ["Adapt", "DataAPI", "Tables"]
-git-tree-sha1 = "8099ed9fb90b6e754d6ba8c6ed8670f010eadca0"
+git-tree-sha1 = "26ea43b4be7e919a2390c3c0f824e7eb4fc19a0a"
 uuid = "09ab397b-f2b6-538f-b94a-2f83cf4a842a"
-version = "0.4.4"
+version = "0.5.0"
 
 [[StructTypes]]
 deps = ["Dates", "UUIDs"]
@@ -823,14 +908,20 @@ version = "1.0.0"
 
 [[Tables]]
 deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "LinearAlgebra", "TableTraits", "Test"]
-git-tree-sha1 = "8dc2bb7d3548e315d890706547b24502ed79504f"
+git-tree-sha1 = "a716dde43d57fa537a19058d044b495301ba6565"
 uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c"
-version = "1.3.1"
+version = "1.3.2"
 
 [[Tar]]
 deps = ["ArgTools", "SHA"]
 uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
 
+[[TerminalLoggers]]
+deps = ["LeftChildRightSiblingTrees", "Logging", "Markdown", "Printf", "ProgressLogging", "UUIDs"]
+git-tree-sha1 = "e185a19bb9172f0cf5bc71233fab92a46f7ae154"
+uuid = "5d786b92-1e48-4d6f-9151-6b4477ca9bed"
+version = "0.1.3"
+
 [[Test]]
 deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
 uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
@@ -853,6 +944,11 @@ git-tree-sha1 = "9550eba57ebc2f7677c4c946aaca56e149ca73ff"
 uuid = "28d57a85-8fef-5791-bfe6-a80928e7c999"
 version = "0.4.59"
 
+[[URIs]]
+git-tree-sha1 = "7855809b88d7b16e9b029afd17880930626f54a2"
+uuid = "5c2747f8-b7ea-4ff2-ba2e-563bfd36b1d4"
+version = "1.2.0"
+
 [[UUIDs]]
 deps = ["Random", "SHA"]
 uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
@@ -865,6 +961,12 @@ version = "1.0.2"
 [[Unicode]]
 uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
 
+[[VectorizationBase]]
+deps = ["ArrayInterface", "Hwloc", "IfElse", "Libdl", "LinearAlgebra"]
+git-tree-sha1 = "fff40362e3170e934afb15b9447e06812c6322e4"
+uuid = "3d5dd08c-fd9d-11e8-17fa-ed2836048c2f"
+version = "0.18.8"
+
 [[Wayland_jll]]
 deps = ["Artifacts", "Expat_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Pkg", "XML2_jll"]
 git-tree-sha1 = "dc643a9b774da1c2781413fd7b6dcd2c56bb8056"
diff --git a/IntervalsModel/Project.toml b/IntervalsModel/Project.toml
new file mode 100644
index 0000000000000000000000000000000000000000..dadafe079fa2edff18f47dbab5d30acb5657a22b
--- /dev/null
+++ b/IntervalsModel/Project.toml
@@ -0,0 +1,24 @@
+name = "IntervalsModel"
+uuid = "44cf8977-2b44-493c-b5ee-991d4e866cca"
+authors = ["pjentsch <pjentsch@uwaterloo.ca>"]
+version = "0.1.0"
+
+[deps]
+BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
+CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b"
+CovidAlertVaccinationModel = "9260c4ec-b5cf-4bc2-ad29-e1d23bf2bd6f"
+DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
+Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
+Intervals = "d8418881-c3e1-53bb-8760-2df7ec849ed5"
+KissABC = "9c9dad79-530a-4643-a18b-2704674d4108"
+ModularIntervals = "1e07f51d-1de3-4a1c-a782-ac336877d585"
+Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
+RandomNumbers = "e6cf234a-135c-5ec9-84dd-332b85af5143"
+Serialization = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
+StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
+
+[extras]
+Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
+
+[targets]
+test = ["Test"]
diff --git a/NetworkModel/network-data/.gitkeep b/IntervalsModel/network-data/.gitkeep
similarity index 100%
rename from NetworkModel/network-data/.gitkeep
rename to IntervalsModel/network-data/.gitkeep
diff --git a/NetworkModel/network-data/HH/.gitkeep b/IntervalsModel/network-data/HH/.gitkeep
similarity index 100%
rename from NetworkModel/network-data/HH/.gitkeep
rename to IntervalsModel/network-data/HH/.gitkeep
diff --git a/NetworkModel/network-data/POLYMOD/.gitkeep b/IntervalsModel/network-data/POLYMOD/.gitkeep
similarity index 100%
rename from NetworkModel/network-data/POLYMOD/.gitkeep
rename to IntervalsModel/network-data/POLYMOD/.gitkeep
diff --git a/NetworkModel/network-data/POLYMOD/2008_Mossong_POLYMOD_contact_common.csv b/IntervalsModel/network-data/POLYMOD/2008_Mossong_POLYMOD_contact_common.csv
similarity index 100%
rename from NetworkModel/network-data/POLYMOD/2008_Mossong_POLYMOD_contact_common.csv
rename to IntervalsModel/network-data/POLYMOD/2008_Mossong_POLYMOD_contact_common.csv
diff --git a/NetworkModel/network-data/POLYMOD/2008_Mossong_POLYMOD_participant_common.csv b/IntervalsModel/network-data/POLYMOD/2008_Mossong_POLYMOD_participant_common.csv
similarity index 100%
rename from NetworkModel/network-data/POLYMOD/2008_Mossong_POLYMOD_participant_common.csv
rename to IntervalsModel/network-data/POLYMOD/2008_Mossong_POLYMOD_participant_common.csv
diff --git a/NetworkModel/network-data/POLYMOD/AALContact_data.csv b/IntervalsModel/network-data/POLYMOD/AALContact_data.csv
similarity index 100%
rename from NetworkModel/network-data/POLYMOD/AALContact_data.csv
rename to IntervalsModel/network-data/POLYMOD/AALContact_data.csv
diff --git a/NetworkModel/network-data/POLYMOD/Contact-distributions.ipynb b/IntervalsModel/network-data/POLYMOD/Contact-distributions.ipynb
similarity index 100%
rename from NetworkModel/network-data/POLYMOD/Contact-distributions.ipynb
rename to IntervalsModel/network-data/POLYMOD/Contact-distributions.ipynb
diff --git a/NetworkModel/network-data/POLYMOD/wsAlphas.csv b/IntervalsModel/network-data/POLYMOD/wsAlphas.csv
similarity index 100%
rename from NetworkModel/network-data/POLYMOD/wsAlphas.csv
rename to IntervalsModel/network-data/POLYMOD/wsAlphas.csv
diff --git a/NetworkModel/network-data/Prem/.gitkeep b/IntervalsModel/network-data/Prem/.gitkeep
similarity index 100%
rename from NetworkModel/network-data/Prem/.gitkeep
rename to IntervalsModel/network-data/Prem/.gitkeep
diff --git a/NetworkModel/network-data/Prem/Canadian-Avgs.ipynb b/IntervalsModel/network-data/Prem/Canadian-Avgs.ipynb
similarity index 100%
rename from NetworkModel/network-data/Prem/Canadian-Avgs.ipynb
rename to IntervalsModel/network-data/Prem/Canadian-Avgs.ipynb
diff --git a/NetworkModel/network-data/Prem/Prem_Canada.csv b/IntervalsModel/network-data/Prem/Prem_Canada.csv
similarity index 100%
rename from NetworkModel/network-data/Prem/Prem_Canada.csv
rename to IntervalsModel/network-data/Prem/Prem_Canada.csv
diff --git a/NetworkModel/network-data/Prem/Prem_home.csv b/IntervalsModel/network-data/Prem/Prem_home.csv
similarity index 100%
rename from NetworkModel/network-data/Prem/Prem_home.csv
rename to IntervalsModel/network-data/Prem/Prem_home.csv
diff --git a/NetworkModel/network-data/Prem/Prem_rest.csv b/IntervalsModel/network-data/Prem/Prem_rest.csv
similarity index 100%
rename from NetworkModel/network-data/Prem/Prem_rest.csv
rename to IntervalsModel/network-data/Prem/Prem_rest.csv
diff --git a/NetworkModel/network-data/Prem/Prem_school.csv b/IntervalsModel/network-data/Prem/Prem_school.csv
similarity index 100%
rename from NetworkModel/network-data/Prem/Prem_school.csv
rename to IntervalsModel/network-data/Prem/Prem_school.csv
diff --git a/NetworkModel/network-data/Prem/Prem_work.csv b/IntervalsModel/network-data/Prem/Prem_work.csv
similarity index 100%
rename from NetworkModel/network-data/Prem/Prem_work.csv
rename to IntervalsModel/network-data/Prem/Prem_work.csv
diff --git a/NetworkModel/network-data/Prem/restMixing.csv b/IntervalsModel/network-data/Prem/restMixing.csv
similarity index 100%
rename from NetworkModel/network-data/Prem/restMixing.csv
rename to IntervalsModel/network-data/Prem/restMixing.csv
diff --git a/NetworkModel/network-data/Prem/wsMixing.csv b/IntervalsModel/network-data/Prem/wsMixing.csv
similarity index 100%
rename from NetworkModel/network-data/Prem/wsMixing.csv
rename to IntervalsModel/network-data/Prem/wsMixing.csv
diff --git a/NetworkModel/network-data/Timeuse/.gitkeep b/IntervalsModel/network-data/Timeuse/.gitkeep
similarity index 100%
rename from NetworkModel/network-data/Timeuse/.gitkeep
rename to IntervalsModel/network-data/Timeuse/.gitkeep
diff --git a/NetworkModel/network-data/Timeuse/HH/.gitkeep b/IntervalsModel/network-data/Timeuse/HH/.gitkeep
similarity index 100%
rename from NetworkModel/network-data/Timeuse/HH/.gitkeep
rename to IntervalsModel/network-data/Timeuse/HH/.gitkeep
diff --git a/NetworkModel/network-data/Timeuse/HH/Duration-fitting.py b/IntervalsModel/network-data/Timeuse/HH/Duration-fitting.py
similarity index 100%
rename from NetworkModel/network-data/Timeuse/HH/Duration-fitting.py
rename to IntervalsModel/network-data/Timeuse/HH/Duration-fitting.py
diff --git a/NetworkModel/network-data/Timeuse/HH/HHAggDur.py b/IntervalsModel/network-data/Timeuse/HH/HHAggDur.py
similarity index 100%
rename from NetworkModel/network-data/Timeuse/HH/HHAggDur.py
rename to IntervalsModel/network-data/Timeuse/HH/HHAggDur.py
diff --git a/NetworkModel/network-data/Timeuse/HH/HHComp.csv b/IntervalsModel/network-data/Timeuse/HH/HHComp.csv
similarity index 100%
rename from NetworkModel/network-data/Timeuse/HH/HHComp.csv
rename to IntervalsModel/network-data/Timeuse/HH/HHComp.csv
diff --git a/NetworkModel/network-data/Timeuse/HH/HHComp.py b/IntervalsModel/network-data/Timeuse/HH/HHComp.py
similarity index 100%
rename from NetworkModel/network-data/Timeuse/HH/HHComp.py
rename to IntervalsModel/network-data/Timeuse/HH/HHComp.py
diff --git a/NetworkModel/network-data/Timeuse/HH/HHYMO.csv b/IntervalsModel/network-data/Timeuse/HH/HHYMO.csv
similarity index 100%
rename from NetworkModel/network-data/Timeuse/HH/HHYMO.csv
rename to IntervalsModel/network-data/Timeuse/HH/HHYMO.csv
diff --git a/NetworkModel/network-data/Timeuse/HH/Intervals_Model.py b/IntervalsModel/network-data/Timeuse/HH/Intervals_Model.py
similarity index 100%
rename from NetworkModel/network-data/Timeuse/HH/Intervals_Model.py
rename to IntervalsModel/network-data/Timeuse/HH/Intervals_Model.py
diff --git a/NetworkModel/network-data/Timeuse/HH/__pycache__/Intervals_Model.cpython-39.pyc b/IntervalsModel/network-data/Timeuse/HH/__pycache__/Intervals_Model.cpython-39.pyc
similarity index 100%
rename from NetworkModel/network-data/Timeuse/HH/__pycache__/Intervals_Model.cpython-39.pyc
rename to IntervalsModel/network-data/Timeuse/HH/__pycache__/Intervals_Model.cpython-39.pyc
diff --git a/NetworkModel/network-data/Timeuse/Rest/.gitkeep b/IntervalsModel/network-data/Timeuse/Rest/.gitkeep
similarity index 100%
rename from NetworkModel/network-data/Timeuse/Rest/.gitkeep
rename to IntervalsModel/network-data/Timeuse/Rest/.gitkeep
diff --git a/NetworkModel/network-data/Timeuse/Rest/RData.py b/IntervalsModel/network-data/Timeuse/Rest/RData.py
similarity index 100%
rename from NetworkModel/network-data/Timeuse/Rest/RData.py
rename to IntervalsModel/network-data/Timeuse/Rest/RData.py
diff --git a/NetworkModel/network-data/Timeuse/Rest/RDataM.csv b/IntervalsModel/network-data/Timeuse/Rest/RDataM.csv
similarity index 100%
rename from NetworkModel/network-data/Timeuse/Rest/RDataM.csv
rename to IntervalsModel/network-data/Timeuse/Rest/RDataM.csv
diff --git a/NetworkModel/network-data/Timeuse/Rest/RDataO.csv b/IntervalsModel/network-data/Timeuse/Rest/RDataO.csv
similarity index 100%
rename from NetworkModel/network-data/Timeuse/Rest/RDataO.csv
rename to IntervalsModel/network-data/Timeuse/Rest/RDataO.csv
diff --git a/NetworkModel/network-data/Timeuse/Rest/RDataY.csv b/IntervalsModel/network-data/Timeuse/Rest/RDataY.csv
similarity index 100%
rename from NetworkModel/network-data/Timeuse/Rest/RDataY.csv
rename to IntervalsModel/network-data/Timeuse/Rest/RDataY.csv
diff --git a/NetworkModel/network-data/Timeuse/WS/.gitkeep b/IntervalsModel/network-data/Timeuse/WS/.gitkeep
similarity index 100%
rename from NetworkModel/network-data/Timeuse/WS/.gitkeep
rename to IntervalsModel/network-data/Timeuse/WS/.gitkeep
diff --git a/NetworkModel/network-data/Timeuse/WS/WSData.py b/IntervalsModel/network-data/Timeuse/WS/WSData.py
similarity index 100%
rename from NetworkModel/network-data/Timeuse/WS/WSData.py
rename to IntervalsModel/network-data/Timeuse/WS/WSData.py
diff --git a/NetworkModel/network-data/Timeuse/WS/WorkschoolDataM.csv b/IntervalsModel/network-data/Timeuse/WS/WorkschoolDataM.csv
similarity index 100%
rename from NetworkModel/network-data/Timeuse/WS/WorkschoolDataM.csv
rename to IntervalsModel/network-data/Timeuse/WS/WorkschoolDataM.csv
diff --git a/NetworkModel/network-data/Timeuse/WS/WorkschoolDataO.csv b/IntervalsModel/network-data/Timeuse/WS/WorkschoolDataO.csv
similarity index 100%
rename from NetworkModel/network-data/Timeuse/WS/WorkschoolDataO.csv
rename to IntervalsModel/network-data/Timeuse/WS/WorkschoolDataO.csv
diff --git a/NetworkModel/network-data/Timeuse/WS/WorkschoolDataY.csv b/IntervalsModel/network-data/Timeuse/WS/WorkschoolDataY.csv
similarity index 100%
rename from NetworkModel/network-data/Timeuse/WS/WorkschoolDataY.csv
rename to IntervalsModel/network-data/Timeuse/WS/WorkschoolDataY.csv
diff --git a/NetworkModel/norm_dists.pdf b/IntervalsModel/norm_dists.pdf
similarity index 100%
rename from NetworkModel/norm_dists.pdf
rename to IntervalsModel/norm_dists.pdf
diff --git a/NetworkModel/pois_dists.pdf b/IntervalsModel/pois_dists.pdf
similarity index 100%
rename from NetworkModel/pois_dists.pdf
rename to IntervalsModel/pois_dists.pdf
diff --git a/IntervalsModel/src/IntervalsModel.jl b/IntervalsModel/src/IntervalsModel.jl
new file mode 100644
index 0000000000000000000000000000000000000000..8d1fa4f038a449b56aa4b2ec8eebd60020cb9c3a
--- /dev/null
+++ b/IntervalsModel/src/IntervalsModel.jl
@@ -0,0 +1,43 @@
+module IntervalsModel
+
+using Intervals
+using CSV
+using DataFrames
+using RandomNumbers.Xorshifts
+using StatsBase
+using Distributions
+using CovidAlertVaccinationModel
+const PACKAGE_FOLDER = dirname(dirname(pathof(IntervalsModel)))
+const HHYMO = DataFrame(CSV.File("$PACKAGE_FOLDER/network-data/Timeuse/HH/HHYMO.csv"))
+const rng = Xoroshiro128Plus()
+const YOUNG, MIDDLE,OLD = 1,2,3
+const cnst = (
+    # Set the underlying parameters for the intervals model
+    Sparam = [60,12],
+    # Set parameters for intervals sample and subpopulation size
+    numsamples = 100,
+    subsize = size(HHYMO)[1],
+    durmax = 144,
+    # Swap age brackets for numbers
+    swap = Dict("Y" => YOUNG, "M" => MIDDLE, "O" => OLD),
+    # Total weight in survey
+    Wghttotal = sum(HHYMO[:,"WGHT_PER"]),
+
+    MUbounds = (6,12*6),
+    SIGMAbounds = (1,48),
+    ws_distributions = CovidAlertVaccinationModel.initial_workschool_mixing_matrix,
+    rest_distributions = CovidAlertVaccinationModel.initial_rest_mixing_matrix
+)
+
+include("data.jl")
+
+
+const dat = make_dat_array() #assign a constant data array
+
+
+include("interval_overlap_sampling.jl")
+include("hh_durations_model.jl")
+include("bayesian_estimation.jl")
+include("plots.jl")
+
+end # module
\ No newline at end of file
diff --git a/IntervalsModel/src/bayesian_estimation.jl b/IntervalsModel/src/bayesian_estimation.jl
new file mode 100644
index 0000000000000000000000000000000000000000..0df0dd85a8a2cfb0ed55e9fc75b5b9a0866c6258
--- /dev/null
+++ b/IntervalsModel/src/bayesian_estimation.jl
@@ -0,0 +1,28 @@
+
+
+using KissABC
+using BenchmarkTools
+using Serialization
+using Plots
+function bayesian_estimate_hh()
+
+# Set parameter bounds for fitting
+    BoundsNORM = vcat([cnst.MUbounds for i = 1:6], [cnst.SIGMAbounds for i = 1:6])
+
+    norm_init = vcat([cnst.MUbounds[1] for i = 1:6], [cnst.SIGMAbounds[1] for i = 1:6])
+    BoundsPOIS = [cnst.MUbounds for i in 1:6]
+    pois_init = [cnst.MUbounds[1] for i = 1:6]
+
+    priors_norm = Factored([Uniform(l,u) for (l,u) in BoundsNORM]...) #assume uniform priors
+    # @btime err_poisson($pois_init) #compute benchmark of the error function
+
+
+    out_norm = smc(priors_norm,err_norm, verbose=true, nparticles=2000, alpha=0.95, parallel = true) #apply sequential monte carlo with 200 particles
+    serialize("norm.dat",out_norm) #save output
+
+
+    priors_pois = Factored([Uniform(l,u) for (l,u) in BoundsPOIS]...)    
+    out_pois = smc(priors_pois,err_poisson, verbose=true, nparticles=2000, alpha=0.95, parallel = true)#apply sequential monte carlo with 200 particles
+
+    serialize("pois.dat",out_pois) #save output
+end
\ No newline at end of file
diff --git a/IntervalsModel/src/data.jl b/IntervalsModel/src/data.jl
new file mode 100644
index 0000000000000000000000000000000000000000..9e9f3273b05b037a52ff344fdf7b96637727cd46
--- /dev/null
+++ b/IntervalsModel/src/data.jl
@@ -0,0 +1,24 @@
+
+# This function applies pre-processing to the HHYMO data file, and splits it into a namedtuple, which should be faster to index.
+# In particular, we avoid having to modify any strings in the error function.
+function make_dat_array()
+    durs = hcat(
+        Int.(HHYMO[!,"YDUR"*string(cnst.Sparam[2])]),
+        Int.(HHYMO[!,"MDUR"*string(cnst.Sparam[2])]),
+        Int.(HHYMO[!,"ODUR"*string(cnst.Sparam[2])]),
+    )
+    nums = hcat(
+        Int.(HHYMO[!,"YNUM"]),
+        Int.(HHYMO[!,"MNUM"]),
+        Int.(HHYMO[!,"ONUM"]),
+    )
+
+    WGHT = Weights(HHYMO[!,"WGHT_PER"]./cnst.Wghttotal)
+    AGERESP = map(r -> cnst.swap[r],HHYMO[!,"AGERESP"])
+    return (;
+        nums,
+        durs,
+        WGHT,
+        AGERESP
+    )
+end
diff --git a/IntervalsModel/src/hh_durations_model.jl b/IntervalsModel/src/hh_durations_model.jl
new file mode 100644
index 0000000000000000000000000000000000000000..0f17e1e8fab525b8de2dcf83f7bfa69fe0edfa6c
--- /dev/null
+++ b/IntervalsModel/src/hh_durations_model.jl
@@ -0,0 +1,52 @@
+
+#error function for Normal distributions
+function err_norm(params)
+    μ = as_symmetric_matrix(params[1:6])
+    σ = as_symmetric_matrix(params[7:12])
+
+    # this line is commented out, in we want to go back to sampling subsets of the data
+    # this also applies to the @view lines
+    # row_ids = sample(rng,1:length(dat.WGHT), dat.WGHT,cnst.subsize)
+    age_dists = [Normal(μ[i,j],σ[i,j]) for i in YOUNG:OLD, j in YOUNG:OLD]
+    duration_subarray =  dat.durs#@view dat.durs[row_ids,:]
+    num_contacts_subarray = dat.nums#@view dat.nums[row_ids,:]
+
+    # display(num_contacts_subarray)
+    AGERESP =  dat.AGERESP #@view dat.AGERESP[row_ids]
+    errsum = 0
+    @inbounds for i = 1:cnst.subsize
+        age_sample = AGERESP[i]
+        @inbounds for age_j in YOUNG:OLD #for a given age_sample loop over possible contact ages
+            running_sum = 0
+            durs = trunc.(Int,rand(rng,age_dists[age_sample,age_j],num_contacts_subarray[i,age_j])) .% 144
+            expdur = tot_dur_sample(cnst.numsamples,cnst.Sparam,durs)
+            errsum += (expdur/cnst.numsamples - duration_subarray[i,age_j])^2 #compute total 
+        end
+    end
+    return errsum/cnst.subsize #this division not actually necessary
+end
+
+
+#error function for poisson distributions
+function err_poisson(params) #error function for poisson
+    μ = as_symmetric_matrix(params)
+    # row_ids = sample(rng,1:length(dat.WGHT), dat.WGHT,cnst.subsize)
+    age_dists = [Poisson(μ[i,j]) for i in YOUNG:OLD, j in YOUNG:OLD]
+    duration_subarray =  dat.durs#@view dat.durs[row_ids,:]
+    num_contacts_subarray = dat.nums#@view dat.nums[row_ids,:]
+
+    # display(num_contacts_subarray)
+    AGERESP =  dat.AGERESP #@view dat.AGERESP[row_ids]
+
+    errsum = 0
+    @inbounds for i = 1:cnst.subsize
+        age_sample = AGERESP[i]
+        @inbounds for age_j in YOUNG:OLD
+            running_sum = 0
+            durs = trunc.(Int,rand(rng,age_dists[age_sample,age_j],num_contacts_subarray[i,age_j])) .% 144
+            expdur = tot_dur_sample(cnst.numsamples,cnst.Sparam,durs)
+            errsum += (expdur/cnst.numsamples - duration_subarray[i,age_j])^2
+        end
+    end
+    return errsum/cnst.subsize
+end
\ No newline at end of file
diff --git a/IntervalsModel/src/interval_overlap_sampling.jl b/IntervalsModel/src/interval_overlap_sampling.jl
new file mode 100644
index 0000000000000000000000000000000000000000..a137e9111fb1b0c2b6be893f77b0f5e6a7119ea9
--- /dev/null
+++ b/IntervalsModel/src/interval_overlap_sampling.jl
@@ -0,0 +1,42 @@
+
+function coverage!(cov,S_j,E_j)
+    if E_j < S_j
+        push!(cov,Interval(0,E_j))
+        push!(cov,Interval(S_j,cnst.durmax))
+    else
+        push!(cov,Interval(S_j,E_j))
+    end
+end
+#compute the total duration of a sample of intervals
+function tot_dur_sample(n, dist,durlist)
+    if isempty(durlist)
+        return 0
+    end
+    total_dur= 0
+    numcontact = length(durlist)
+
+    int_list = Vector{Interval{Int,Closed,Closed}}()
+    sizehint!(int_list,numcontact*2)
+
+    start_matrix = trunc.(Int,(rand(rng,dist,(numcontact,n))))
+    @inbounds for i in 1:n  
+        empty!(int_list)
+        @inbounds for j in 1:numcontact
+            S_j = start_matrix[j,i] % cnst.durmax
+            E_j = (S_j + durlist[j]) % cnst.durmax
+            coverage!(int_list,S_j,E_j)
+        end
+        union!(int_list)
+        total_dur += mapreduce(Intervals.span,+,int_list)
+    end
+    return total_dur
+end
+
+
+function as_symmetric_matrix(l) #turn a vector of length 6, l, into a symmetric 3x3 matrix, probably a nicer way to do this exists
+    return [
+        l[1] l[2] l[3]
+        l[2] l[4] l[5]
+        l[3] l[5] l[6]
+    ]
+end
\ No newline at end of file
diff --git a/IntervalsModel/src/plots.jl b/IntervalsModel/src/plots.jl
new file mode 100644
index 0000000000000000000000000000000000000000..347989ca4b5400735f954c4acf2012d621c65128
--- /dev/null
+++ b/IntervalsModel/src/plots.jl
@@ -0,0 +1,71 @@
+
+function plot_estimates()
+
+    estimate = deserialize("norm.dat")
+    p_list = []
+
+    for i in 1:length(estimate.P)
+        a = stephist(
+            estimate.P[i].particles;
+            normalize = true,
+            title = i <=6 ? "μ_$i" : "σ_$i" 
+        )
+        push!(p_list,a)
+    end
+    p = plot(p_list...)
+    savefig(p,"norm.png")
+
+    μ_estimate_as_array = as_symmetric_matrix(estimate.P[1:6])
+    σ_estimate_as_array = as_symmetric_matrix(estimate.P[7:12])
+    p_matrix = map(x -> plot(),σ_estimate_as_array)
+    for i in YOUNG:OLD, j in YOUNG:OLD
+        
+        dist = Normal.(μ_estimate_as_array[i,j].particles,σ_estimate_as_array[i,j].particles)
+        
+        data = [pdf.(dist,i) for i in 0.0:144.0]
+        mean_dat = median.(data)
+        err_down = quantile.(data,0.05)
+        err_up = quantile.(data,0.95)
+        p_matrix[i,j] = plot(0:144,mean_dat; ribbon = ( mean_dat .- err_down,err_up .- mean_dat),legend = false)
+
+    end
+    plot!(p_matrix[end,1]; legend = true)
+    p = plot(p_matrix..., size = (600,400))
+    savefig(p,"norm_dists.pdf")
+
+
+
+
+    estimate = deserialize("pois.dat")
+    p_list = []
+    for i in 1:length(estimate.P)
+        a = stephist(
+            estimate.P[i].particles;
+            normalize = true,
+            title = i <=6 ? "μ_$i" : "σ_$i" 
+        )
+        push!(p_list,a)
+    end
+    p = plot(p_list...)
+    savefig(p,"pois.png")
+
+    μ_estimate_as_array = as_symmetric_matrix(estimate.P[1:6])
+    p_matrix = map(x -> plot(),μ_estimate_as_array)
+    for i in YOUNG:OLD, j in YOUNG:OLD
+        
+        dist = Poisson.(μ_estimate_as_array[i,j].particles)
+        
+        data = [pdf.(dist,i) for i in 0.0:144.0]
+        mean_dat = median.(data)
+        err_down = quantile.(data,0.05)
+        err_up = quantile.(data,0.95)
+        p_matrix[i,j] = plot(0:144,mean_dat; ribbon = ( mean_dat .- err_down,err_up .- mean_dat),legend = false)
+
+    end
+    plot!(p_matrix[end,1]; legend = true)
+    p = plot(p_matrix..., size = (600,400))
+    savefig(p,"pois_dists.pdf")
+    
+
+
+end
\ No newline at end of file
diff --git a/IntervalsModel/src/ws_durations_model.jl b/IntervalsModel/src/ws_durations_model.jl
new file mode 100644
index 0000000000000000000000000000000000000000..12eb412043fff0d8e17d18fb44b75cdeed7e492c
--- /dev/null
+++ b/IntervalsModel/src/ws_durations_model.jl
@@ -0,0 +1,52 @@
+
+#error function for Normal distributions
+function err_norm(params)
+    μ = as_symmetric_matrix(params[1:6])
+    σ = as_symmetric_matrix(params[7:12])
+
+    # this line is commented out, if we want to go back to sampling subsets of the data
+    # this also applies to the @view lines
+    # row_ids = sample(rng,1:length(dat.WGHT), dat.WGHT,cnst.subsize)
+    age_dists = [Normal(μ[i,j],σ[i,j]) for i in YOUNG:OLD, j in YOUNG:OLD]
+    duration_subarray =  dat.durs#@view dat.durs[row_ids,:]
+    num_contacts_subarray = dat.nums#@view dat.nums[row_ids,:]
+
+    # display(num_contacts_subarray)
+    AGERESP =  dat.AGERESP #@view dat.AGERESP[row_ids]
+    errsum = 0
+    @inbounds for i = 1:cnst.subsize
+        age_sample = AGERESP[i]
+        @inbounds for age_j in YOUNG:OLD #for a given age_sample loop over possible contact ages
+            running_sum = 0
+            durs = trunc.(Int,rand(rng,age_dists[age_sample,age_j],num_contacts_subarray[i,age_j])) .% 144
+            expdur = tot_dur_sample(cnst.numsamples,cnst.Sparam,durs)
+            errsum += (expdur/cnst.numsamples - duration_subarray[i,age_j])^2 #compute total 
+        end
+    end
+    return errsum/cnst.subsize #this division not actually necessary
+end
+
+
+#error function for poisson distributions
+function err_poisson(params) #error function for poisson
+    μ = as_symmetric_matrix(params)
+    # row_ids = sample(rng,1:length(dat.WGHT), dat.WGHT,cnst.subsize)
+    age_dists = [Poisson(μ[i,j]) for i in YOUNG:OLD, j in YOUNG:OLD]
+    duration_subarray =  dat.durs#@view dat.durs[row_ids,:]
+    num_contacts_subarray = dat.nums#@view dat.nums[row_ids,:]
+
+    # display(num_contacts_subarray)
+    AGERESP =  dat.AGERESP #@view dat.AGERESP[row_ids]
+
+    errsum = 0
+    @inbounds for i = 1:cnst.subsize
+        age_sample = AGERESP[i]
+        @inbounds for age_j in YOUNG:OLD
+            running_sum = 0
+            durs = trunc.(Int,rand(rng,age_dists[age_sample,age_j],num_contacts_subarray[i,age_j])) .% 144
+            expdur = tot_dur_sample(cnst.numsamples,cnst.Sparam,durs)
+            errsum += (expdur/cnst.numsamples - duration_subarray[i,age_j])^2
+        end
+    end
+    return errsum/cnst.subsize
+end
\ No newline at end of file
diff --git a/NetworkModel/test/runtests.jl b/IntervalsModel/test/runtests.jl
similarity index 100%
rename from NetworkModel/test/runtests.jl
rename to IntervalsModel/test/runtests.jl
diff --git a/NetworkModel/Project.toml b/NetworkModel/Project.toml
deleted file mode 100644
index 593ad255028fa0dcfeb2ead107cc859822b71348..0000000000000000000000000000000000000000
--- a/NetworkModel/Project.toml
+++ /dev/null
@@ -1,9 +0,0 @@
-name = "IntervalsModel"
-uuid = "44cf8977-2b44-493c-b5ee-991d4e866cca"
-authors = ["pjentsch <pjentsch@uwaterloo.ca>"]
-version = "0.1.0"
-
-
-
-[targets]
-test = ["Test"]
\ No newline at end of file
diff --git a/NetworkModel/src/IntervalsModel.jl b/NetworkModel/src/IntervalsModel.jl
deleted file mode 100644
index 3954f260c7d8e577db80395aa94e16adfaa0318c..0000000000000000000000000000000000000000
--- a/NetworkModel/src/IntervalsModel.jl
+++ /dev/null
@@ -1,5 +0,0 @@
-module IntervalsModel
-
-greet() = print("Hello World!")
-
-end # module
diff --git a/NetworkModel/src/intervals_model_but_fast.jl b/NetworkModel/src/intervals_model_but_fast.jl
deleted file mode 100644
index 146e0058ac67283e63a3a6dacc5365be2f689be2..0000000000000000000000000000000000000000
--- a/NetworkModel/src/intervals_model_but_fast.jl
+++ /dev/null
@@ -1,244 +0,0 @@
-module intervals_fast
-
-using Intervals
-using CSV
-using DataFrames
-using RandomNumbers.Xorshifts
-using StatsBase
-using Distributions
-import AxisKeys
-const HHYMO = DataFrame(CSV.File("/HH/HHYMO.csv"))
-const rng = Xoroshiro128Plus()
-const YOUNG, MIDDLE,OLD = 1,2,3
-const cnst = (
-    # Set the underlying parameters for the intervals model
-    Sparam = [60,12],
-    # Set parameters for intervals sample and subpopulation size
-    numsamples = 100,
-    subsize = size(HHYMO)[1],
-    durmax = 145,
-    # Swap age brackets for numbers
-    swap = Dict("Y" => YOUNG, "M" => MIDDLE, "O" => OLD),
-    # Total weight in survey
-    Wghttotal = sum(HHYMO[:,"WGHT_PER"]),
-
-    MUbounds = (6,12*6),
-    SIGMAbounds = (1,48),
-)
-
-
-# This function applies pre-processing to the HHYMO data file, and splits it into a namedtuple, which should be faster to index.
-# In particular, we avoid having to modify any strings in the error function.
-function make_dat_array()
-    durs = hcat(
-        Int.(HHYMO[!,"YDUR"*string(cnst.Sparam[2])]),
-        Int.(HHYMO[!,"MDUR"*string(cnst.Sparam[2])]),
-        Int.(HHYMO[!,"ODUR"*string(cnst.Sparam[2])]),
-    )
-    nums = hcat(
-        Int.(HHYMO[!,"YNUM"]),
-        Int.(HHYMO[!,"MNUM"]),
-        Int.(HHYMO[!,"ONUM"]),
-    )
-
-    WGHT = Weights(HHYMO[!,"WGHT_PER"]./cnst.Wghttotal)
-    AGERESP = map(r -> cnst.swap[r],HHYMO[!,"AGERESP"])
-    return (;
-        nums,
-        durs,
-        WGHT,
-        AGERESP
-    )
-end
-
-const dat = make_dat_array() #assign a constant data array
-
-#modify the list of intervals, cov, with a new interval defined by S_j, E_j
-#note that we need to handle the case where E_j < S_j seperately because our intervals operate modulo durmax-1
-#there is probably room for another 2x speedup here with an implementation of Union for modular intervals
-#that would avoid the dynamic allocation in each step, since we could preallocate a vector of Intervals with length(durlist)
-function coverage!(cov,S_j,E_j)
-    if E_j < S_j
-        push!(cov,Interval(0,E_j))
-        push!(cov,Interval(S_j,cnst.durmax-1))
-    else
-        push!(cov,Interval(S_j,E_j))
-    end
-end
-
-#compute the total duration of a sample of intervals
-function tot_dur_sample(n, dist,durlist)
-    if isempty(durlist)
-        return 0
-    end
-    total_dur = 0
-    numcontact = length(durlist)
-    for i in 1:n
-        cov1 = Vector{Interval{Int64, Closed, Closed}}()
-        for j in 1:numcontact
-            S_j = Int(trunc(rand(rng,dist))) % 144
-            E_j = (S_j + durlist[j])%(cnst.durmax-1)
-            coverage!(cov1,S_j,E_j)
-        end
-        union!(cov1)
-        total_dur += mapreduce(Intervals.span,+,cov1)
-    end
-    return total_dur
-end
-
-#error function for Normal distributions
-function err_norm(params)
-    μ = as_symmetric_matrix(params[1:6])
-    σ = as_symmetric_matrix(params[7:12])
-
-    # this line is commented out, in we want to go back to sampling subsets of the data
-    # this also applies to the @view lines
-    # row_ids = sample(rng,1:length(dat.WGHT), dat.WGHT,cnst.subsize)
-    age_dists = [Normal(μ[i,j],σ[i,j]) for i in YOUNG:OLD, j in YOUNG:OLD]
-    duration_subarray =  dat.durs#@view dat.durs[row_ids,:]
-    num_contacts_subarray = dat.nums#@view dat.nums[row_ids,:]
-
-    # display(num_contacts_subarray)
-    AGERESP =  dat.AGERESP #@view dat.AGERESP[row_ids]
-    errsum = 0
-    @inbounds for i = 1:cnst.subsize
-        age_sample = AGERESP[i]
-        @inbounds for age_j in YOUNG:OLD #for a given age_sample loop over possible contact ages
-            running_sum = 0
-            durs = Int.(trunc.(rand(rng,age_dists[age_sample,age_j],num_contacts_subarray[i,age_j]))) .% 144
-            expdur = tot_dur_sample(cnst.numsamples,cnst.Sparam,durs)
-            errsum += (expdur/cnst.numsamples - duration_subarray[i,age_j])^2 #compute total 
-        end
-    end
-    return errsum/cnst.subsize #this division not actually necessary
-end
-function as_symmetric_matrix(l) #turn a vector of length 6, l, into a symmetric 3x3 matrix, probably a nicer way to do this exists
-    return [
-        l[1] l[2] l[3]
-        l[2] l[4] l[5]
-        l[3] l[5] l[6]
-    ]
-end
-function err_poisson(params) #error function for poisson
-    μ = as_symmetric_matrix(params)
-    # row_ids = sample(rng,1:length(dat.WGHT), dat.WGHT,cnst.subsize)
-    age_dists = [Poisson(μ[i,j]) for i in YOUNG:OLD, j in YOUNG:OLD]
-    duration_subarray =  dat.durs#@view dat.durs[row_ids,:]
-    num_contacts_subarray = dat.nums#@view dat.nums[row_ids,:]
-
-    # display(num_contacts_subarray)
-    AGERESP =  dat.AGERESP #@view dat.AGERESP[row_ids]
-
-    errsum = 0
-    @inbounds for i = 1:cnst.subsize
-        age_sample = AGERESP[i]
-        @inbounds for age_j in YOUNG:OLD
-            running_sum = 0
-            durs = Int.(trunc.(rand(rng,age_dists[age_sample,age_j],num_contacts_subarray[i,age_j]))) .% 144
-            expdur = tot_dur_sample(cnst.numsamples,cnst.Sparam,durs)
-            errsum += (expdur/cnst.numsamples - duration_subarray[i,age_j])^2
-        end
-    end
-    return errsum/cnst.subsize
-end
-
-using KissABC
-using BenchmarkTools
-using Serialization
-using Plots
-function bayesian_estimate()
-
-# Set parameter bounds for fitting
-    BoundsNORM = vcat([cnst.MUbounds for i = 1:6], [cnst.SIGMAbounds for i = 1:6])
-
-    norm_init = vcat([cnst.MUbounds[1] for i = 1:6], [cnst.SIGMAbounds[1] for i = 1:6])
-    BoundsPOIS = [cnst.MUbounds for i in 1:6]
-    pois_init = [cnst.MUbounds[1] for i = 1:6]
-
-    priors_norm = Factored([Uniform(l,u) for (l,u) in BoundsNORM]...) #assume uniform priors
-    @btime err_norm($norm_init) #compute benchmark of the error function
-
-
-    out_norm = smc(priors_norm,err_norm, verbose=true, nparticles=200, alpha=0.95, parallel = true) #apply sequential monte carlo with 200 particles
-    serialize("norm.dat",out_norm) #save output
-
-
-    priors_pois = Factored([Uniform(l,u) for (l,u) in BoundsPOIS]...)    
-    out_pois = smc(priors_pois,err_poisson, verbose=true, nparticles=200, alpha=0.95, parallel = true)#apply sequential monte carlo with 200 particles
-
-    serialize("pois.dat",out_pois) #save output
-end
-
-function plot_estimates()
-
-    estimate = deserialize("norm.dat")
-    p_list = []
-
-    for i in 1:length(estimate.P)
-        a = stephist(
-            estimate.P[i].particles;
-            normalize = true,
-            title = i <=6 ? "μ_$i" : "σ_$i" 
-        )
-        push!(p_list,a)
-    end
-    p = plot(p_list...)
-    savefig(p,"norm.png")
-
-    μ_estimate_as_array = as_symmetric_matrix(estimate.P[1:6])
-    σ_estimate_as_array = as_symmetric_matrix(estimate.P[7:12])
-    p_matrix = map(x -> plot(),σ_estimate_as_array)
-    for i in YOUNG:OLD, j in YOUNG:OLD
-        
-        dist = Normal.(μ_estimate_as_array[i,j].particles,σ_estimate_as_array[i,j].particles)
-        
-        data = [pdf.(dist,i) for i in 0.0:144.0]
-        mean_dat = median.(data)
-        err_down = quantile.(data,0.05)
-        err_up = quantile.(data,0.95)
-        p_matrix[i,j] = plot(0:144,mean_dat; ribbon = ( mean_dat .- err_down,err_up .- mean_dat),legend = false)
-
-    end
-    plot!(p_matrix[end,1]; legend = true)
-    p = plot(p_matrix..., size = (600,400))
-    savefig(p,"norm_dists.pdf")
-
-
-
-
-    estimate = deserialize("pois.dat")
-    p_list = []
-    for i in 1:length(estimate.P)
-        a = stephist(
-            estimate.P[i].particles;
-            normalize = true,
-            title = i <=6 ? "μ_$i" : "σ_$i" 
-        )
-        push!(p_list,a)
-    end
-    p = plot(p_list...)
-    savefig(p,"pois.png")
-
-    μ_estimate_as_array = as_symmetric_matrix(estimate.P[1:6])
-    p_matrix = map(x -> plot(),μ_estimate_as_array)
-    for i in YOUNG:OLD, j in YOUNG:OLD
-        
-        dist = Poisson.(μ_estimate_as_array[i,j].particles)
-        
-        data = [pdf.(dist,i) for i in 0.0:144.0]
-        mean_dat = median.(data)
-        err_down = quantile.(data,0.05)
-        err_up = quantile.(data,0.95)
-        p_matrix[i,j] = plot(0:144,mean_dat; ribbon = ( mean_dat .- err_down,err_up .- mean_dat),legend = false)
-
-    end
-    plot!(p_matrix[end,1]; legend = true)
-    p = plot(p_matrix..., size = (600,400))
-    savefig(p,"pois_dists.pdf")
-    
-
-
-end
-
-
-end
\ No newline at end of file
diff --git a/NetworkModel/src/modular_intervals.jl b/NetworkModel/src/modular_intervals.jl
deleted file mode 100644
index 5bb51eaa2d4a8f167fc35c2a2d11109a89e24034..0000000000000000000000000000000000000000
--- a/NetworkModel/src/modular_intervals.jl
+++ /dev/null
@@ -1,12 +0,0 @@
-module ModularIntervals
-export ModularInterval
-
-import Intervals:Interval,merge
-
-struct Interval <: AbstractInterval
-
-
-
-
-
-end
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..61f5f7feaca66fe3fe90b7096129edda9bf6cf0d
--- /dev/null
+++ b/README.md
@@ -0,0 +1,13 @@
+# CovidAlertVaccinationModel
+
+This repo contains two packages:
+
+* CovidAlertVaccinationModel (in CovidABM)
+
+* IntervalsModel
+
+The former implements the ABM, and the latter computes the time-distributions. The latter model depends on the former model because the mixing distributions are defined there, although really I guess it should be the inverse. 
+
+
+
+Note that you need the Julia-1.6 beta to compile this project successfully. This is because the mixing distributions hit a compiler bug. 
diff --git a/norm.dat b/norm.dat
new file mode 100644
index 0000000000000000000000000000000000000000..2ddc1225c6b292990c169d01c3b23f659176621a
Binary files /dev/null and b/norm.dat differ
diff --git a/norm.png b/norm.png
new file mode 100644
index 0000000000000000000000000000000000000000..c393441311b0424b862684ffb5150b63df656bf4
Binary files /dev/null and b/norm.png differ
diff --git a/norm_dists.pdf b/norm_dists.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..bb383752d90e5bfcd739f8ec148f3bd9edf13701
Binary files /dev/null and b/norm_dists.pdf differ
diff --git a/pois.dat b/pois.dat
new file mode 100644
index 0000000000000000000000000000000000000000..3adaceb18e8086b74a138e8cad8457561c6750a9
Binary files /dev/null and b/pois.dat differ
diff --git a/pois.png b/pois.png
new file mode 100644
index 0000000000000000000000000000000000000000..2133dc66a0903f6fcb49e0f152156ba9e918309b
Binary files /dev/null and b/pois.png differ
diff --git a/pois_dists.pdf b/pois_dists.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..ef1442c80f57facffdf978546141a35659405751
Binary files /dev/null and b/pois_dists.pdf differ