diff --git a/Package.resolved b/Package.resolved
index 377c4a1ad0ebd31e3294fdd816f0b310345ad80f..7e9c86f8b5f2f359860755f981e8a5e5f42c141f 100644
--- a/Package.resolved
+++ b/Package.resolved
@@ -81,6 +81,15 @@
         "version" : "0.5.0"
       }
     },
+    {
+      "identity" : "swift-syntax",
+      "kind" : "remoteSourceControl",
+      "location" : "https://github.com/apple/swift-syntax.git",
+      "state" : {
+        "revision" : "64889f0c732f210a935a0ad7cda38f77f876262d",
+        "version" : "509.1.1"
+      }
+    },
     {
       "identity" : "swiftannoy",
       "kind" : "remoteSourceControl",
diff --git a/Package.swift b/Package.swift
index 45c096b03044fc263c12b71100c9ee81b984033a..8e1bc7b562003de446e255c0eb4c24c7e772f975 100644
--- a/Package.swift
+++ b/Package.swift
@@ -2,11 +2,12 @@
 // The swift-tools-version declares the minimum version of Swift required to build this package.
 
 import PackageDescription
+import CompilerPluginSupport
 
 let package = Package(
     name: "SwiftNLP",
     platforms: [
-            .macOS(.v13),
+            .macOS(.v13)
     ],
     products: [
         .library(
@@ -18,12 +19,21 @@ let package = Package(
         .package(url: "https://github.com/jkrukowski/SwiftFaiss.git", from: "0.0.7"),
         .package(url: "https://github.com/L1MeN9Yu/Elva", .upToNextMajor(from: "2.1.3")),
         .package(url: "https://github.com/JadenGeller/similarity-topology", .upToNextMajor(from: "0.1.14")),
+        .package(url: "https://github.com/apple/swift-syntax", from: "509.0.0")
     ],
     targets: [
+        .macro(
+            name: "SwiftNLPGenericLLMMacros",
+            dependencies: [
+                .product(name: "SwiftSyntaxMacros", package: "swift-syntax"),
+                .product(name: "SwiftCompilerPlugin", package: "swift-syntax")
+            ]
+        ),
         .target(
             name: "SwiftNLP",
             dependencies: [
                 //"SwiftAnnoy",
+                "SwiftNLPGenericLLMMacros",
                 .product(name: "HNSWAlgorithm", package: "similarity-topology"),
                 .product(name: "HNSWEphemeral", package: "similarity-topology"),
                 .product(name: "ZSTD", package: "Elva"),
diff --git a/Sources/SwiftNLP/2. Encoding/CoreMLEncoder.swift b/Sources/SwiftNLP/2. Encoding/CoreMLEncoder.swift
index 7871e3a8f68e432aa2dc537761653f60f7c61cc9..ab8c2c1a4aea75051733caef25e492d2231482b0 100644
--- a/Sources/SwiftNLP/2. Encoding/CoreMLEncoder.swift	
+++ b/Sources/SwiftNLP/2. Encoding/CoreMLEncoder.swift	
@@ -38,18 +38,25 @@ import CoreML
 // get another coreml available model and test both
 
 
-class CoreMLEncoder<Scalar: BinaryFloatingPoint & Codable>: SNLPEncoder {
+@freestanding(expression)
+public macro MODEL_MAKE_PREDICTION(_ model_type: Any) = #externalMacro(
+    module: "SwiftNLPGenericLLMMacros",
+    type: "LLMModelPredictionCases")
+
 
-    var zeroes: [Scalar] 
+class CoreMLEncoder<Scalar: BinaryFloatingPoint & Codable>: SNLPEncoder {
+    
+    var zeroes: [Scalar]
+    var model: String
         
     func encodeToken(_ token: String) -> [Scalar] {
-        let tokenization = MiniLMEmbeddings().tokenizer.tokenizeToIds(text: token) as! [Scalar]
+        let tokenization = MiniLMEmbeddings(model_type: self.model).tokenizer.tokenizeToIds(text: token) as! [Scalar]
         return tokenization
     }
     
     func encodeSentence(_ sentence: String) -> [Scalar] {
         let encoding = Task {
-            await MiniLMEmbeddings().encode(sentence: sentence)
+            await MiniLMEmbeddings(model_type: self.model).encode(sentence: sentence)
         } as! [Scalar]
         return encoding
     }
@@ -59,46 +66,48 @@ class CoreMLEncoder<Scalar: BinaryFloatingPoint & Codable>: SNLPEncoder {
 @available(macOS 13.0, *)
 public class MiniLMEmbeddings {
     
-    private let model: LLMModel
+    private let model: String
     public let tokenizer: BertTokenizer
     public let inputDimention: Int = 128
     public let outputDimention: Int = 384
 
-    public init() {
+    public init(model_type: String) {
         let modelConfig = MLModelConfiguration()
         modelConfig.computeUnits = .all
 
-        do {
-            self.model = try LLMModel<all_MiniLM_L6_v2>()
-        } catch {
-            fatalError("Failed to load the Core ML model. Error: \(error.localizedDescription)")
-        }
-
+        self.model = model_type;
         self.tokenizer = BertTokenizer(maxLen: self.inputDimention)
     }
 
      // MARK: - Dense Embeddings
 
     public func encode(sentence: String) async -> [Float]? {
-        // Encode input text as bert tokens
-        let inputTokens = tokenizer.buildModelTokens(sentence: sentence)
-        let (inputIds, attentionMask) = tokenizer.buildModelInputs(from: inputTokens)
+         // Encode input text as bert tokens
+         let inputTokens = tokenizer.buildModelTokens(sentence: sentence)
+         let (inputIds, attentionMask) = tokenizer.buildModelInputs(from: inputTokens)
 
-        print(inputIds.count, attentionMask.count)
+         print(inputIds.count, attentionMask.count)
 
-        // Send tokens through the MLModel
-        let embeddings = generateEmbeddings(inputIds: inputIds, attentionMask: attentionMask)
+         // Send tokens through the MLModel
+         let embeddings = generateEmbeddings(inputIds: inputIds, attentionMask: attentionMask)
 
-        print(inputIds.count, attentionMask.count)
+         print(inputIds.count, attentionMask.count)
 
-        return embeddings
+         return embeddings
     }
 
     public func generateEmbeddings(inputIds: MLMultiArray, attentionMask: MLMultiArray) -> [Float]? {
-        let output : LLMModelOutput? = try? model.prediction(input: LLMModelInput(input_ids: inputIds, attention_mask: attentionMask))
-        guard let embeddings = output?.embeddings else {
-            return nil
+        // let input_class: () = #MODEL_INPUT("input_ids: inputIds, attention_mask: attentionMask")
+        
+        var output: MLMultiArray? = nil
+        
+        #MODEL_MAKE_PREDICTION("input_ids: inputIds, attention_mask: attentionMask")
+        
+        if (output === nil) {
+            return nil;
         }
+        
+        let embeddings = output!;
        
         var embeddingsArray = [Float]()
         for index in 0..<embeddings.count {
diff --git a/Sources/SwiftNLP/2. Encoding/GenericModel.swift b/Sources/SwiftNLP/2. Encoding/GenericModel.swift
new file mode 100644
index 0000000000000000000000000000000000000000..601ded3519ac48c80a3de13530bae5698fe3c80a
--- /dev/null
+++ b/Sources/SwiftNLP/2. Encoding/GenericModel.swift	
@@ -0,0 +1,13 @@
+import CoreML
+
+
+@freestanding(declaration, names: arbitrary)
+public macro MODEL_PREDICTION_FUNCTIONS(_ model_type: Any) = #externalMacro(
+    module: "SwiftNLPGenericLLMMacros",
+    type: "LLMPredictionFunctions")
+
+
+struct LLMModel {
+    #MODEL_PREDICTION_FUNCTIONS("all_MiniLM_L6_v2")
+    #MODEL_PREDICTION_FUNCTIONS("float32_model")
+}
diff --git a/Sources/SwiftNLP/Models/float32_model.mlmodelc/analytics/coremldata.bin b/Sources/SwiftNLP/Models/float32_model.mlmodelc/analytics/coremldata.bin
new file mode 100644
index 0000000000000000000000000000000000000000..8ba8d463c4dcdb85db8524a058f730f7f5fe5307
Binary files /dev/null and b/Sources/SwiftNLP/Models/float32_model.mlmodelc/analytics/coremldata.bin differ
diff --git a/Sources/SwiftNLP/Models/float32_model.mlmodelc/coremldata.bin b/Sources/SwiftNLP/Models/float32_model.mlmodelc/coremldata.bin
new file mode 100644
index 0000000000000000000000000000000000000000..a5f7878d99d8b11a852842e3cbffb2a20e622057
Binary files /dev/null and b/Sources/SwiftNLP/Models/float32_model.mlmodelc/coremldata.bin differ
diff --git a/Sources/SwiftNLP/Models/float32_model.mlmodelc/metadata.json b/Sources/SwiftNLP/Models/float32_model.mlmodelc/metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..e68a06a26ab46dde93b27942f06f9abae3afc5b6
--- /dev/null
+++ b/Sources/SwiftNLP/Models/float32_model.mlmodelc/metadata.json
@@ -0,0 +1,98 @@
+[
+  {
+    "metadataOutputVersion" : "3.0",
+    "shortDescription" : "thenlper\/gte-small (feature-extraction)",
+    "outputSchema" : [
+      {
+        "hasShapeFlexibility" : "0",
+        "isOptional" : "0",
+        "dataType" : "Float32",
+        "formattedType" : "MultiArray (Float32 1 × 128 × 384)",
+        "shortDescription" : "Sequence of hidden-states at the output of the last layer of the model",
+        "shape" : "[1, 128, 384]",
+        "name" : "last_hidden_state",
+        "type" : "MultiArray"
+      },
+      {
+        "hasShapeFlexibility" : "0",
+        "isOptional" : "0",
+        "dataType" : "Float32",
+        "formattedType" : "MultiArray (Float32 1 × 384)",
+        "shortDescription" : "Last layer hidden-state of the first token of the sequence",
+        "shape" : "[1, 384]",
+        "name" : "pooler_output",
+        "type" : "MultiArray"
+      }
+    ],
+    "storagePrecision" : "Float32",
+    "modelParameters" : [
+
+    ],
+    "specificationVersion" : 6,
+    "mlProgramOperationTypeHistogram" : {
+      "Linear" : 73,
+      "Gelu" : 12,
+      "LayerNorm" : 25,
+      "SliceByIndex" : 1,
+      "Matmul" : 24,
+      "Sub" : 1,
+      "Tanh" : 1,
+      "Transpose" : 48,
+      "Softmax" : 12,
+      "Mul" : 13,
+      "Cast" : 1,
+      "Reshape" : 48,
+      "Add" : 38,
+      "ExpandDims" : 2,
+      "Gather" : 1
+    },
+    "computePrecision" : "Mixed (Float32, Int32)",
+    "isUpdatable" : "0",
+    "availability" : {
+      "macOS" : "12.0",
+      "tvOS" : "15.0",
+      "visionOS" : "1.0",
+      "watchOS" : "8.0",
+      "iOS" : "15.0",
+      "macCatalyst" : "15.0"
+    },
+    "modelType" : {
+      "name" : "MLModelType_mlProgram"
+    },
+    "userDefinedMetadata" : {
+      "com.github.apple.coremltools.source" : "torch==2.1.0",
+      "com.github.apple.coremltools.version" : "7.1",
+      "transformers_version" : "4.28.1",
+      "com.github.apple.coremltools.source_dialect" : "TorchScript",
+      "co.huggingface.exporters.architecture" : "BertModel",
+      "co.huggingface.exporters.name" : "thenlper\/gte-small",
+      "co.huggingface.exporters.framework" : "pytorch",
+      "co.huggingface.exporters.task" : "feature-extraction",
+      "co.huggingface.exporters.precision" : "float32"
+    },
+    "inputSchema" : [
+      {
+        "hasShapeFlexibility" : "0",
+        "isOptional" : "0",
+        "dataType" : "Int32",
+        "formattedType" : "MultiArray (Int32 1 × 128)",
+        "shortDescription" : "Indices of input sequence tokens in the vocabulary",
+        "shape" : "[1, 128]",
+        "name" : "input_ids",
+        "type" : "MultiArray"
+      },
+      {
+        "hasShapeFlexibility" : "0",
+        "isOptional" : "0",
+        "dataType" : "Int32",
+        "formattedType" : "MultiArray (Int32 1 × 128)",
+        "shortDescription" : "Mask to avoid performing attention on padding token indices (1 = not masked, 0 = masked)",
+        "shape" : "[1, 128]",
+        "name" : "attention_mask",
+        "type" : "MultiArray"
+      }
+    ],
+    "generatedClassName" : "float32_model",
+    "method" : "predict"
+  }
+]
\ No newline at end of file
diff --git a/Sources/SwiftNLP/Models/float32_model.mlmodelc/model.mil b/Sources/SwiftNLP/Models/float32_model.mlmodelc/model.mil
new file mode 100644
index 0000000000000000000000000000000000000000..f0c89a26e339b10c94771f51214454cfcbfa9871
--- /dev/null
+++ b/Sources/SwiftNLP/Models/float32_model.mlmodelc/model.mil
@@ -0,0 +1,710 @@
+program(1.0)
+[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.40.3"}, {"coremltools-component-torch", "2.1.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.1"}})]
+{
+    func main<ios15>(tensor<int32, [1, 128]> attention_mask, tensor<int32, [1, 128]> input_ids) {
+            tensor<fp32, [30522, 384]> model_embeddings_word_embeddings_weight = const()[name = tensor<string, []>("model_embeddings_word_embeddings_weight"), val = tensor<fp32, [30522, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
+            tensor<fp32, [384]> model_embeddings_LayerNorm_bias = const()[name = tensor<string, []>("model_embeddings_LayerNorm_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(46881920)))];
+            tensor<fp32, [384]> model_embeddings_LayerNorm_weight = const()[name = tensor<string, []>("model_embeddings_LayerNorm_weight"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(46883520)))];
+            tensor<fp32, [384]> model_encoder_layer_0_attention_self_query_bias = const()[name = tensor<string, []>("model_encoder_layer_0_attention_self_query_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(46885120)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_0_attention_self_query_weight = const()[name = tensor<string, []>("model_encoder_layer_0_attention_self_query_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(46886720)))];
+            tensor<fp32, [384]> model_encoder_layer_0_attention_self_key_bias = const()[name = tensor<string, []>("model_encoder_layer_0_attention_self_key_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(47476608)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_0_attention_self_key_weight = const()[name = tensor<string, []>("model_encoder_layer_0_attention_self_key_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(47478208)))];
+            tensor<fp32, [384]> model_encoder_layer_0_attention_self_value_bias = const()[name = tensor<string, []>("model_encoder_layer_0_attention_self_value_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(48068096)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_0_attention_self_value_weight = const()[name = tensor<string, []>("model_encoder_layer_0_attention_self_value_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(48069696)))];
+            tensor<fp32, [384]> model_encoder_layer_0_attention_output_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_0_attention_output_dense_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(48659584)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_0_attention_output_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_0_attention_output_dense_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(48661184)))];
+            tensor<fp32, [384]> model_encoder_layer_0_attention_output_LayerNorm_bias = const()[name = tensor<string, []>("model_encoder_layer_0_attention_output_LayerNorm_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(49251072)))];
+            tensor<fp32, [384]> model_encoder_layer_0_attention_output_LayerNorm_weight = const()[name = tensor<string, []>("model_encoder_layer_0_attention_output_LayerNorm_weight"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(49252672)))];
+            tensor<fp32, [1536]> model_encoder_layer_0_intermediate_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_0_intermediate_dense_bias"), val = tensor<fp32, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(49254272)))];
+            tensor<fp32, [1536, 384]> model_encoder_layer_0_intermediate_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_0_intermediate_dense_weight"), val = tensor<fp32, [1536, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(49260480)))];
+            tensor<fp32, [384]> model_encoder_layer_0_output_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_0_output_dense_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(51619840)))];
+            tensor<fp32, [384, 1536]> model_encoder_layer_0_output_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_0_output_dense_weight"), val = tensor<fp32, [384, 1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(51621440)))];
+            tensor<fp32, [384]> model_encoder_layer_0_output_LayerNorm_bias = const()[name = tensor<string, []>("model_encoder_layer_0_output_LayerNorm_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(53980800)))];
+            tensor<fp32, [384]> model_encoder_layer_0_output_LayerNorm_weight = const()[name = tensor<string, []>("model_encoder_layer_0_output_LayerNorm_weight"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(53982400)))];
+            tensor<fp32, [384]> model_encoder_layer_1_attention_self_query_bias = const()[name = tensor<string, []>("model_encoder_layer_1_attention_self_query_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(53984000)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_1_attention_self_query_weight = const()[name = tensor<string, []>("model_encoder_layer_1_attention_self_query_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(53985600)))];
+            tensor<fp32, [384]> model_encoder_layer_1_attention_self_key_bias = const()[name = tensor<string, []>("model_encoder_layer_1_attention_self_key_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(54575488)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_1_attention_self_key_weight = const()[name = tensor<string, []>("model_encoder_layer_1_attention_self_key_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(54577088)))];
+            tensor<fp32, [384]> model_encoder_layer_1_attention_self_value_bias = const()[name = tensor<string, []>("model_encoder_layer_1_attention_self_value_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(55166976)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_1_attention_self_value_weight = const()[name = tensor<string, []>("model_encoder_layer_1_attention_self_value_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(55168576)))];
+            tensor<fp32, [384]> model_encoder_layer_1_attention_output_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_1_attention_output_dense_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(55758464)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_1_attention_output_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_1_attention_output_dense_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(55760064)))];
+            tensor<fp32, [384]> model_encoder_layer_1_attention_output_LayerNorm_bias = const()[name = tensor<string, []>("model_encoder_layer_1_attention_output_LayerNorm_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56349952)))];
+            tensor<fp32, [384]> model_encoder_layer_1_attention_output_LayerNorm_weight = const()[name = tensor<string, []>("model_encoder_layer_1_attention_output_LayerNorm_weight"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56351552)))];
+            tensor<fp32, [1536]> model_encoder_layer_1_intermediate_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_1_intermediate_dense_bias"), val = tensor<fp32, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56353152)))];
+            tensor<fp32, [1536, 384]> model_encoder_layer_1_intermediate_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_1_intermediate_dense_weight"), val = tensor<fp32, [1536, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56359360)))];
+            tensor<fp32, [384]> model_encoder_layer_1_output_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_1_output_dense_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(58718720)))];
+            tensor<fp32, [384, 1536]> model_encoder_layer_1_output_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_1_output_dense_weight"), val = tensor<fp32, [384, 1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(58720320)))];
+            tensor<fp32, [384]> model_encoder_layer_1_output_LayerNorm_bias = const()[name = tensor<string, []>("model_encoder_layer_1_output_LayerNorm_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(61079680)))];
+            tensor<fp32, [384]> model_encoder_layer_1_output_LayerNorm_weight = const()[name = tensor<string, []>("model_encoder_layer_1_output_LayerNorm_weight"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(61081280)))];
+            tensor<fp32, [384]> model_encoder_layer_2_attention_self_query_bias = const()[name = tensor<string, []>("model_encoder_layer_2_attention_self_query_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(61082880)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_2_attention_self_query_weight = const()[name = tensor<string, []>("model_encoder_layer_2_attention_self_query_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(61084480)))];
+            tensor<fp32, [384]> model_encoder_layer_2_attention_self_key_bias = const()[name = tensor<string, []>("model_encoder_layer_2_attention_self_key_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(61674368)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_2_attention_self_key_weight = const()[name = tensor<string, []>("model_encoder_layer_2_attention_self_key_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(61675968)))];
+            tensor<fp32, [384]> model_encoder_layer_2_attention_self_value_bias = const()[name = tensor<string, []>("model_encoder_layer_2_attention_self_value_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(62265856)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_2_attention_self_value_weight = const()[name = tensor<string, []>("model_encoder_layer_2_attention_self_value_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(62267456)))];
+            tensor<fp32, [384]> model_encoder_layer_2_attention_output_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_2_attention_output_dense_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(62857344)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_2_attention_output_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_2_attention_output_dense_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(62858944)))];
+            tensor<fp32, [384]> model_encoder_layer_2_attention_output_LayerNorm_bias = const()[name = tensor<string, []>("model_encoder_layer_2_attention_output_LayerNorm_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(63448832)))];
+            tensor<fp32, [384]> model_encoder_layer_2_attention_output_LayerNorm_weight = const()[name = tensor<string, []>("model_encoder_layer_2_attention_output_LayerNorm_weight"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(63450432)))];
+            tensor<fp32, [1536]> model_encoder_layer_2_intermediate_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_2_intermediate_dense_bias"), val = tensor<fp32, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(63452032)))];
+            tensor<fp32, [1536, 384]> model_encoder_layer_2_intermediate_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_2_intermediate_dense_weight"), val = tensor<fp32, [1536, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(63458240)))];
+            tensor<fp32, [384]> model_encoder_layer_2_output_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_2_output_dense_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(65817600)))];
+            tensor<fp32, [384, 1536]> model_encoder_layer_2_output_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_2_output_dense_weight"), val = tensor<fp32, [384, 1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(65819200)))];
+            tensor<fp32, [384]> model_encoder_layer_2_output_LayerNorm_bias = const()[name = tensor<string, []>("model_encoder_layer_2_output_LayerNorm_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(68178560)))];
+            tensor<fp32, [384]> model_encoder_layer_2_output_LayerNorm_weight = const()[name = tensor<string, []>("model_encoder_layer_2_output_LayerNorm_weight"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(68180160)))];
+            tensor<fp32, [384]> model_encoder_layer_3_attention_self_query_bias = const()[name = tensor<string, []>("model_encoder_layer_3_attention_self_query_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(68181760)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_3_attention_self_query_weight = const()[name = tensor<string, []>("model_encoder_layer_3_attention_self_query_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(68183360)))];
+            tensor<fp32, [384]> model_encoder_layer_3_attention_self_key_bias = const()[name = tensor<string, []>("model_encoder_layer_3_attention_self_key_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(68773248)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_3_attention_self_key_weight = const()[name = tensor<string, []>("model_encoder_layer_3_attention_self_key_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(68774848)))];
+            tensor<fp32, [384]> model_encoder_layer_3_attention_self_value_bias = const()[name = tensor<string, []>("model_encoder_layer_3_attention_self_value_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(69364736)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_3_attention_self_value_weight = const()[name = tensor<string, []>("model_encoder_layer_3_attention_self_value_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(69366336)))];
+            tensor<fp32, [384]> model_encoder_layer_3_attention_output_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_3_attention_output_dense_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(69956224)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_3_attention_output_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_3_attention_output_dense_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(69957824)))];
+            tensor<fp32, [384]> model_encoder_layer_3_attention_output_LayerNorm_bias = const()[name = tensor<string, []>("model_encoder_layer_3_attention_output_LayerNorm_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(70547712)))];
+            tensor<fp32, [384]> model_encoder_layer_3_attention_output_LayerNorm_weight = const()[name = tensor<string, []>("model_encoder_layer_3_attention_output_LayerNorm_weight"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(70549312)))];
+            tensor<fp32, [1536]> model_encoder_layer_3_intermediate_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_3_intermediate_dense_bias"), val = tensor<fp32, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(70550912)))];
+            tensor<fp32, [1536, 384]> model_encoder_layer_3_intermediate_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_3_intermediate_dense_weight"), val = tensor<fp32, [1536, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(70557120)))];
+            tensor<fp32, [384]> model_encoder_layer_3_output_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_3_output_dense_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(72916480)))];
+            tensor<fp32, [384, 1536]> model_encoder_layer_3_output_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_3_output_dense_weight"), val = tensor<fp32, [384, 1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(72918080)))];
+            tensor<fp32, [384]> model_encoder_layer_3_output_LayerNorm_bias = const()[name = tensor<string, []>("model_encoder_layer_3_output_LayerNorm_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(75277440)))];
+            tensor<fp32, [384]> model_encoder_layer_3_output_LayerNorm_weight = const()[name = tensor<string, []>("model_encoder_layer_3_output_LayerNorm_weight"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(75279040)))];
+            tensor<fp32, [384]> model_encoder_layer_4_attention_self_query_bias = const()[name = tensor<string, []>("model_encoder_layer_4_attention_self_query_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(75280640)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_4_attention_self_query_weight = const()[name = tensor<string, []>("model_encoder_layer_4_attention_self_query_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(75282240)))];
+            tensor<fp32, [384]> model_encoder_layer_4_attention_self_key_bias = const()[name = tensor<string, []>("model_encoder_layer_4_attention_self_key_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(75872128)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_4_attention_self_key_weight = const()[name = tensor<string, []>("model_encoder_layer_4_attention_self_key_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(75873728)))];
+            tensor<fp32, [384]> model_encoder_layer_4_attention_self_value_bias = const()[name = tensor<string, []>("model_encoder_layer_4_attention_self_value_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(76463616)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_4_attention_self_value_weight = const()[name = tensor<string, []>("model_encoder_layer_4_attention_self_value_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(76465216)))];
+            tensor<fp32, [384]> model_encoder_layer_4_attention_output_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_4_attention_output_dense_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(77055104)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_4_attention_output_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_4_attention_output_dense_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(77056704)))];
+            tensor<fp32, [384]> model_encoder_layer_4_attention_output_LayerNorm_bias = const()[name = tensor<string, []>("model_encoder_layer_4_attention_output_LayerNorm_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(77646592)))];
+            tensor<fp32, [384]> model_encoder_layer_4_attention_output_LayerNorm_weight = const()[name = tensor<string, []>("model_encoder_layer_4_attention_output_LayerNorm_weight"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(77648192)))];
+            tensor<fp32, [1536]> model_encoder_layer_4_intermediate_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_4_intermediate_dense_bias"), val = tensor<fp32, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(77649792)))];
+            tensor<fp32, [1536, 384]> model_encoder_layer_4_intermediate_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_4_intermediate_dense_weight"), val = tensor<fp32, [1536, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(77656000)))];
+            tensor<fp32, [384]> model_encoder_layer_4_output_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_4_output_dense_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(80015360)))];
+            tensor<fp32, [384, 1536]> model_encoder_layer_4_output_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_4_output_dense_weight"), val = tensor<fp32, [384, 1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(80016960)))];
+            tensor<fp32, [384]> model_encoder_layer_4_output_LayerNorm_bias = const()[name = tensor<string, []>("model_encoder_layer_4_output_LayerNorm_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(82376320)))];
+            tensor<fp32, [384]> model_encoder_layer_4_output_LayerNorm_weight = const()[name = tensor<string, []>("model_encoder_layer_4_output_LayerNorm_weight"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(82377920)))];
+            tensor<fp32, [384]> model_encoder_layer_5_attention_self_query_bias = const()[name = tensor<string, []>("model_encoder_layer_5_attention_self_query_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(82379520)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_5_attention_self_query_weight = const()[name = tensor<string, []>("model_encoder_layer_5_attention_self_query_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(82381120)))];
+            tensor<fp32, [384]> model_encoder_layer_5_attention_self_key_bias = const()[name = tensor<string, []>("model_encoder_layer_5_attention_self_key_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(82971008)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_5_attention_self_key_weight = const()[name = tensor<string, []>("model_encoder_layer_5_attention_self_key_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(82972608)))];
+            tensor<fp32, [384]> model_encoder_layer_5_attention_self_value_bias = const()[name = tensor<string, []>("model_encoder_layer_5_attention_self_value_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(83562496)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_5_attention_self_value_weight = const()[name = tensor<string, []>("model_encoder_layer_5_attention_self_value_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(83564096)))];
+            tensor<fp32, [384]> model_encoder_layer_5_attention_output_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_5_attention_output_dense_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(84153984)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_5_attention_output_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_5_attention_output_dense_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(84155584)))];
+            tensor<fp32, [384]> model_encoder_layer_5_attention_output_LayerNorm_bias = const()[name = tensor<string, []>("model_encoder_layer_5_attention_output_LayerNorm_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(84745472)))];
+            tensor<fp32, [384]> model_encoder_layer_5_attention_output_LayerNorm_weight = const()[name = tensor<string, []>("model_encoder_layer_5_attention_output_LayerNorm_weight"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(84747072)))];
+            tensor<fp32, [1536]> model_encoder_layer_5_intermediate_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_5_intermediate_dense_bias"), val = tensor<fp32, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(84748672)))];
+            tensor<fp32, [1536, 384]> model_encoder_layer_5_intermediate_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_5_intermediate_dense_weight"), val = tensor<fp32, [1536, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(84754880)))];
+            tensor<fp32, [384]> model_encoder_layer_5_output_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_5_output_dense_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(87114240)))];
+            tensor<fp32, [384, 1536]> model_encoder_layer_5_output_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_5_output_dense_weight"), val = tensor<fp32, [384, 1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(87115840)))];
+            tensor<fp32, [384]> model_encoder_layer_5_output_LayerNorm_bias = const()[name = tensor<string, []>("model_encoder_layer_5_output_LayerNorm_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(89475200)))];
+            tensor<fp32, [384]> model_encoder_layer_5_output_LayerNorm_weight = const()[name = tensor<string, []>("model_encoder_layer_5_output_LayerNorm_weight"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(89476800)))];
+            tensor<fp32, [384]> model_encoder_layer_6_attention_self_query_bias = const()[name = tensor<string, []>("model_encoder_layer_6_attention_self_query_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(89478400)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_6_attention_self_query_weight = const()[name = tensor<string, []>("model_encoder_layer_6_attention_self_query_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(89480000)))];
+            tensor<fp32, [384]> model_encoder_layer_6_attention_self_key_bias = const()[name = tensor<string, []>("model_encoder_layer_6_attention_self_key_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(90069888)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_6_attention_self_key_weight = const()[name = tensor<string, []>("model_encoder_layer_6_attention_self_key_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(90071488)))];
+            tensor<fp32, [384]> model_encoder_layer_6_attention_self_value_bias = const()[name = tensor<string, []>("model_encoder_layer_6_attention_self_value_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(90661376)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_6_attention_self_value_weight = const()[name = tensor<string, []>("model_encoder_layer_6_attention_self_value_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(90662976)))];
+            tensor<fp32, [384]> model_encoder_layer_6_attention_output_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_6_attention_output_dense_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(91252864)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_6_attention_output_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_6_attention_output_dense_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(91254464)))];
+            tensor<fp32, [384]> model_encoder_layer_6_attention_output_LayerNorm_bias = const()[name = tensor<string, []>("model_encoder_layer_6_attention_output_LayerNorm_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(91844352)))];
+            tensor<fp32, [384]> model_encoder_layer_6_attention_output_LayerNorm_weight = const()[name = tensor<string, []>("model_encoder_layer_6_attention_output_LayerNorm_weight"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(91845952)))];
+            tensor<fp32, [1536]> model_encoder_layer_6_intermediate_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_6_intermediate_dense_bias"), val = tensor<fp32, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(91847552)))];
+            tensor<fp32, [1536, 384]> model_encoder_layer_6_intermediate_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_6_intermediate_dense_weight"), val = tensor<fp32, [1536, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(91853760)))];
+            tensor<fp32, [384]> model_encoder_layer_6_output_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_6_output_dense_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(94213120)))];
+            tensor<fp32, [384, 1536]> model_encoder_layer_6_output_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_6_output_dense_weight"), val = tensor<fp32, [384, 1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(94214720)))];
+            tensor<fp32, [384]> model_encoder_layer_6_output_LayerNorm_bias = const()[name = tensor<string, []>("model_encoder_layer_6_output_LayerNorm_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(96574080)))];
+            tensor<fp32, [384]> model_encoder_layer_6_output_LayerNorm_weight = const()[name = tensor<string, []>("model_encoder_layer_6_output_LayerNorm_weight"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(96575680)))];
+            tensor<fp32, [384]> model_encoder_layer_7_attention_self_query_bias = const()[name = tensor<string, []>("model_encoder_layer_7_attention_self_query_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(96577280)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_7_attention_self_query_weight = const()[name = tensor<string, []>("model_encoder_layer_7_attention_self_query_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(96578880)))];
+            tensor<fp32, [384]> model_encoder_layer_7_attention_self_key_bias = const()[name = tensor<string, []>("model_encoder_layer_7_attention_self_key_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(97168768)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_7_attention_self_key_weight = const()[name = tensor<string, []>("model_encoder_layer_7_attention_self_key_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(97170368)))];
+            tensor<fp32, [384]> model_encoder_layer_7_attention_self_value_bias = const()[name = tensor<string, []>("model_encoder_layer_7_attention_self_value_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(97760256)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_7_attention_self_value_weight = const()[name = tensor<string, []>("model_encoder_layer_7_attention_self_value_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(97761856)))];
+            tensor<fp32, [384]> model_encoder_layer_7_attention_output_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_7_attention_output_dense_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(98351744)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_7_attention_output_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_7_attention_output_dense_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(98353344)))];
+            tensor<fp32, [384]> model_encoder_layer_7_attention_output_LayerNorm_bias = const()[name = tensor<string, []>("model_encoder_layer_7_attention_output_LayerNorm_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(98943232)))];
+            tensor<fp32, [384]> model_encoder_layer_7_attention_output_LayerNorm_weight = const()[name = tensor<string, []>("model_encoder_layer_7_attention_output_LayerNorm_weight"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(98944832)))];
+            tensor<fp32, [1536]> model_encoder_layer_7_intermediate_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_7_intermediate_dense_bias"), val = tensor<fp32, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(98946432)))];
+            tensor<fp32, [1536, 384]> model_encoder_layer_7_intermediate_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_7_intermediate_dense_weight"), val = tensor<fp32, [1536, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(98952640)))];
+            tensor<fp32, [384]> model_encoder_layer_7_output_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_7_output_dense_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(101312000)))];
+            tensor<fp32, [384, 1536]> model_encoder_layer_7_output_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_7_output_dense_weight"), val = tensor<fp32, [384, 1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(101313600)))];
+            tensor<fp32, [384]> model_encoder_layer_7_output_LayerNorm_bias = const()[name = tensor<string, []>("model_encoder_layer_7_output_LayerNorm_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(103672960)))];
+            tensor<fp32, [384]> model_encoder_layer_7_output_LayerNorm_weight = const()[name = tensor<string, []>("model_encoder_layer_7_output_LayerNorm_weight"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(103674560)))];
+            tensor<fp32, [384]> model_encoder_layer_8_attention_self_query_bias = const()[name = tensor<string, []>("model_encoder_layer_8_attention_self_query_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(103676160)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_8_attention_self_query_weight = const()[name = tensor<string, []>("model_encoder_layer_8_attention_self_query_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(103677760)))];
+            tensor<fp32, [384]> model_encoder_layer_8_attention_self_key_bias = const()[name = tensor<string, []>("model_encoder_layer_8_attention_self_key_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(104267648)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_8_attention_self_key_weight = const()[name = tensor<string, []>("model_encoder_layer_8_attention_self_key_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(104269248)))];
+            tensor<fp32, [384]> model_encoder_layer_8_attention_self_value_bias = const()[name = tensor<string, []>("model_encoder_layer_8_attention_self_value_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(104859136)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_8_attention_self_value_weight = const()[name = tensor<string, []>("model_encoder_layer_8_attention_self_value_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(104860736)))];
+            tensor<fp32, [384]> model_encoder_layer_8_attention_output_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_8_attention_output_dense_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(105450624)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_8_attention_output_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_8_attention_output_dense_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(105452224)))];
+            tensor<fp32, [384]> model_encoder_layer_8_attention_output_LayerNorm_bias = const()[name = tensor<string, []>("model_encoder_layer_8_attention_output_LayerNorm_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(106042112)))];
+            tensor<fp32, [384]> model_encoder_layer_8_attention_output_LayerNorm_weight = const()[name = tensor<string, []>("model_encoder_layer_8_attention_output_LayerNorm_weight"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(106043712)))];
+            tensor<fp32, [1536]> model_encoder_layer_8_intermediate_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_8_intermediate_dense_bias"), val = tensor<fp32, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(106045312)))];
+            tensor<fp32, [1536, 384]> model_encoder_layer_8_intermediate_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_8_intermediate_dense_weight"), val = tensor<fp32, [1536, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(106051520)))];
+            tensor<fp32, [384]> model_encoder_layer_8_output_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_8_output_dense_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(108410880)))];
+            tensor<fp32, [384, 1536]> model_encoder_layer_8_output_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_8_output_dense_weight"), val = tensor<fp32, [384, 1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(108412480)))];
+            tensor<fp32, [384]> model_encoder_layer_8_output_LayerNorm_bias = const()[name = tensor<string, []>("model_encoder_layer_8_output_LayerNorm_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(110771840)))];
+            tensor<fp32, [384]> model_encoder_layer_8_output_LayerNorm_weight = const()[name = tensor<string, []>("model_encoder_layer_8_output_LayerNorm_weight"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(110773440)))];
+            tensor<fp32, [384]> model_encoder_layer_9_attention_self_query_bias = const()[name = tensor<string, []>("model_encoder_layer_9_attention_self_query_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(110775040)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_9_attention_self_query_weight = const()[name = tensor<string, []>("model_encoder_layer_9_attention_self_query_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(110776640)))];
+            tensor<fp32, [384]> model_encoder_layer_9_attention_self_key_bias = const()[name = tensor<string, []>("model_encoder_layer_9_attention_self_key_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(111366528)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_9_attention_self_key_weight = const()[name = tensor<string, []>("model_encoder_layer_9_attention_self_key_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(111368128)))];
+            tensor<fp32, [384]> model_encoder_layer_9_attention_self_value_bias = const()[name = tensor<string, []>("model_encoder_layer_9_attention_self_value_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(111958016)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_9_attention_self_value_weight = const()[name = tensor<string, []>("model_encoder_layer_9_attention_self_value_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(111959616)))];
+            tensor<fp32, [384]> model_encoder_layer_9_attention_output_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_9_attention_output_dense_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(112549504)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_9_attention_output_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_9_attention_output_dense_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(112551104)))];
+            tensor<fp32, [384]> model_encoder_layer_9_attention_output_LayerNorm_bias = const()[name = tensor<string, []>("model_encoder_layer_9_attention_output_LayerNorm_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(113140992)))];
+            tensor<fp32, [384]> model_encoder_layer_9_attention_output_LayerNorm_weight = const()[name = tensor<string, []>("model_encoder_layer_9_attention_output_LayerNorm_weight"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(113142592)))];
+            tensor<fp32, [1536]> model_encoder_layer_9_intermediate_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_9_intermediate_dense_bias"), val = tensor<fp32, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(113144192)))];
+            tensor<fp32, [1536, 384]> model_encoder_layer_9_intermediate_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_9_intermediate_dense_weight"), val = tensor<fp32, [1536, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(113150400)))];
+            tensor<fp32, [384]> model_encoder_layer_9_output_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_9_output_dense_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(115509760)))];
+            tensor<fp32, [384, 1536]> model_encoder_layer_9_output_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_9_output_dense_weight"), val = tensor<fp32, [384, 1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(115511360)))];
+            tensor<fp32, [384]> model_encoder_layer_9_output_LayerNorm_bias = const()[name = tensor<string, []>("model_encoder_layer_9_output_LayerNorm_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(117870720)))];
+            tensor<fp32, [384]> model_encoder_layer_9_output_LayerNorm_weight = const()[name = tensor<string, []>("model_encoder_layer_9_output_LayerNorm_weight"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(117872320)))];
+            tensor<fp32, [384]> model_encoder_layer_10_attention_self_query_bias = const()[name = tensor<string, []>("model_encoder_layer_10_attention_self_query_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(117873920)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_10_attention_self_query_weight = const()[name = tensor<string, []>("model_encoder_layer_10_attention_self_query_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(117875520)))];
+            tensor<fp32, [384]> model_encoder_layer_10_attention_self_key_bias = const()[name = tensor<string, []>("model_encoder_layer_10_attention_self_key_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(118465408)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_10_attention_self_key_weight = const()[name = tensor<string, []>("model_encoder_layer_10_attention_self_key_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(118467008)))];
+            tensor<fp32, [384]> model_encoder_layer_10_attention_self_value_bias = const()[name = tensor<string, []>("model_encoder_layer_10_attention_self_value_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(119056896)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_10_attention_self_value_weight = const()[name = tensor<string, []>("model_encoder_layer_10_attention_self_value_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(119058496)))];
+            tensor<fp32, [384]> model_encoder_layer_10_attention_output_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_10_attention_output_dense_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(119648384)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_10_attention_output_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_10_attention_output_dense_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(119649984)))];
+            tensor<fp32, [384]> model_encoder_layer_10_attention_output_LayerNorm_bias = const()[name = tensor<string, []>("model_encoder_layer_10_attention_output_LayerNorm_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(120239872)))];
+            tensor<fp32, [384]> model_encoder_layer_10_attention_output_LayerNorm_weight = const()[name = tensor<string, []>("model_encoder_layer_10_attention_output_LayerNorm_weight"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(120241472)))];
+            tensor<fp32, [1536]> model_encoder_layer_10_intermediate_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_10_intermediate_dense_bias"), val = tensor<fp32, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(120243072)))];
+            tensor<fp32, [1536, 384]> model_encoder_layer_10_intermediate_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_10_intermediate_dense_weight"), val = tensor<fp32, [1536, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(120249280)))];
+            tensor<fp32, [384]> model_encoder_layer_10_output_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_10_output_dense_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(122608640)))];
+            tensor<fp32, [384, 1536]> model_encoder_layer_10_output_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_10_output_dense_weight"), val = tensor<fp32, [384, 1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(122610240)))];
+            tensor<fp32, [384]> model_encoder_layer_10_output_LayerNorm_bias = const()[name = tensor<string, []>("model_encoder_layer_10_output_LayerNorm_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(124969600)))];
+            tensor<fp32, [384]> model_encoder_layer_10_output_LayerNorm_weight = const()[name = tensor<string, []>("model_encoder_layer_10_output_LayerNorm_weight"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(124971200)))];
+            tensor<fp32, [384]> model_encoder_layer_11_attention_self_query_bias = const()[name = tensor<string, []>("model_encoder_layer_11_attention_self_query_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(124972800)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_11_attention_self_query_weight = const()[name = tensor<string, []>("model_encoder_layer_11_attention_self_query_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(124974400)))];
+            tensor<fp32, [384]> model_encoder_layer_11_attention_self_key_bias = const()[name = tensor<string, []>("model_encoder_layer_11_attention_self_key_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(125564288)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_11_attention_self_key_weight = const()[name = tensor<string, []>("model_encoder_layer_11_attention_self_key_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(125565888)))];
+            tensor<fp32, [384]> model_encoder_layer_11_attention_self_value_bias = const()[name = tensor<string, []>("model_encoder_layer_11_attention_self_value_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(126155776)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_11_attention_self_value_weight = const()[name = tensor<string, []>("model_encoder_layer_11_attention_self_value_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(126157376)))];
+            tensor<fp32, [384]> model_encoder_layer_11_attention_output_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_11_attention_output_dense_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(126747264)))];
+            tensor<fp32, [384, 384]> model_encoder_layer_11_attention_output_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_11_attention_output_dense_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(126748864)))];
+            tensor<fp32, [384]> model_encoder_layer_11_attention_output_LayerNorm_bias = const()[name = tensor<string, []>("model_encoder_layer_11_attention_output_LayerNorm_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(127338752)))];
+            tensor<fp32, [384]> model_encoder_layer_11_attention_output_LayerNorm_weight = const()[name = tensor<string, []>("model_encoder_layer_11_attention_output_LayerNorm_weight"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(127340352)))];
+            tensor<fp32, [1536]> model_encoder_layer_11_intermediate_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_11_intermediate_dense_bias"), val = tensor<fp32, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(127341952)))];
+            tensor<fp32, [1536, 384]> model_encoder_layer_11_intermediate_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_11_intermediate_dense_weight"), val = tensor<fp32, [1536, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(127348160)))];
+            tensor<fp32, [384]> model_encoder_layer_11_output_dense_bias = const()[name = tensor<string, []>("model_encoder_layer_11_output_dense_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(129707520)))];
+            tensor<fp32, [384, 1536]> model_encoder_layer_11_output_dense_weight = const()[name = tensor<string, []>("model_encoder_layer_11_output_dense_weight"), val = tensor<fp32, [384, 1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(129709120)))];
+            tensor<fp32, [384]> model_encoder_layer_11_output_LayerNorm_bias = const()[name = tensor<string, []>("model_encoder_layer_11_output_LayerNorm_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(132068480)))];
+            tensor<fp32, [384]> model_encoder_layer_11_output_LayerNorm_weight = const()[name = tensor<string, []>("model_encoder_layer_11_output_LayerNorm_weight"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(132070080)))];
+            tensor<fp32, [384]> model_pooler_dense_bias = const()[name = tensor<string, []>("model_pooler_dense_bias"), val = tensor<fp32, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(132071680)))];
+            tensor<fp32, [384, 384]> model_pooler_dense_weight = const()[name = tensor<string, []>("model_pooler_dense_weight"), val = tensor<fp32, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(132073280)))];
+            tensor<int32, []> var_8 = const()[name = tensor<string, []>("op_8"), val = tensor<int32, []>(-1)];
+            tensor<fp32, []> var_10 = const()[name = tensor<string, []>("op_10"), val = tensor<fp32, []>(0x1.197998p-40)];
+            tensor<fp32, []> var_13 = const()[name = tensor<string, []>("op_13"), val = tensor<fp32, []>(0x1p+0)];
+            tensor<int32, [1]> var_34_axes_0 = const()[name = tensor<string, []>("op_34_axes_0"), val = tensor<int32, [1]>([1])];
+            tensor<int32, [1, 1, 128]> var_34 = expand_dims(axes = var_34_axes_0, x = attention_mask)[name = tensor<string, []>("op_34")];
+            tensor<int32, [1]> var_35_axes_0 = const()[name = tensor<string, []>("op_35_axes_0"), val = tensor<int32, [1]>([2])];
+            tensor<int32, [1, 1, 1, 128]> var_35 = expand_dims(axes = var_35_axes_0, x = var_34)[name = tensor<string, []>("op_35")];
+            tensor<string, []> var_37_dtype_0 = const()[name = tensor<string, []>("op_37_dtype_0"), val = tensor<string, []>("fp32")];
+            tensor<fp32, [1, 1, 1, 128]> cast_75 = cast(dtype = var_37_dtype_0, x = var_35)[name = tensor<string, []>("cast_75")];
+            tensor<fp32, [1, 1, 1, 128]> var_38 = sub(x = var_13, y = cast_75)[name = tensor<string, []>("op_38")];
+            tensor<fp32, []> var_39 = const()[name = tensor<string, []>("op_39"), val = tensor<fp32, []>(-0x1.fffffep+127)];
+            tensor<fp32, [1, 1, 1, 128]> attention_mask_1 = mul(x = var_38, y = var_39)[name = tensor<string, []>("attention_mask")];
+            tensor<int32, []> inputs_embeds_axis_0 = const()[name = tensor<string, []>("inputs_embeds_axis_0"), val = tensor<int32, []>(0)];
+            tensor<fp32, [1, 128, 384]> inputs_embeds = gather(axis = inputs_embeds_axis_0, indices = input_ids, x = model_embeddings_word_embeddings_weight)[name = tensor<string, []>("inputs_embeds")];
+            tensor<fp32, [1, 128, 384]> token_type_embeddings_1 = const()[name = tensor<string, []>("token_type_embeddings_1"), val = tensor<fp32, [1, 128, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(132663168)))];
+            tensor<fp32, [1, 128, 384]> embeddings_1 = add(x = inputs_embeds, y = token_type_embeddings_1)[name = tensor<string, []>("embeddings_1")];
+            tensor<fp32, [1, 128, 384]> position_embeddings_1 = const()[name = tensor<string, []>("position_embeddings_1"), val = tensor<fp32, [1, 128, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(132859840)))];
+            tensor<fp32, [1, 128, 384]> input_5 = add(x = embeddings_1, y = position_embeddings_1)[name = tensor<string, []>("input_5")];
+            tensor<int32, [1]> input_7_axes_0 = const()[name = tensor<string, []>("input_7_axes_0"), val = tensor<int32, [1]>([-1])];
+            tensor<fp32, [1, 128, 384]> input_7 = layer_norm(axes = input_7_axes_0, beta = model_embeddings_LayerNorm_bias, epsilon = var_10, gamma = model_embeddings_LayerNorm_weight, x = input_5)[name = tensor<string, []>("input_7")];
+            tensor<fp32, [1, 128, 384]> linear_0 = linear(bias = model_encoder_layer_0_attention_self_query_bias, weight = model_encoder_layer_0_attention_self_query_weight, x = input_7)[name = tensor<string, []>("linear_0")];
+            tensor<fp32, [1, 128, 384]> linear_1 = linear(bias = model_encoder_layer_0_attention_self_key_bias, weight = model_encoder_layer_0_attention_self_key_weight, x = input_7)[name = tensor<string, []>("linear_1")];
+            tensor<int32, [4]> var_106 = const()[name = tensor<string, []>("op_106"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_3 = reshape(shape = var_106, x = linear_1)[name = tensor<string, []>("x_3")];
+            tensor<fp32, [1, 128, 384]> linear_2 = linear(bias = model_encoder_layer_0_attention_self_value_bias, weight = model_encoder_layer_0_attention_self_value_weight, x = input_7)[name = tensor<string, []>("linear_2")];
+            tensor<int32, [4]> var_115 = const()[name = tensor<string, []>("op_115"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_7 = reshape(shape = var_115, x = linear_2)[name = tensor<string, []>("x_7")];
+            tensor<int32, [4]> var_117 = const()[name = tensor<string, []>("op_117"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [4]> var_121 = const()[name = tensor<string, []>("op_121"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_11 = reshape(shape = var_121, x = linear_0)[name = tensor<string, []>("x_11")];
+            tensor<bool, []> attention_scores_1_transpose_x_0 = const()[name = tensor<string, []>("attention_scores_1_transpose_x_0"), val = tensor<bool, []>(false)];
+            tensor<bool, []> attention_scores_1_transpose_y_0 = const()[name = tensor<string, []>("attention_scores_1_transpose_y_0"), val = tensor<bool, []>(false)];
+            tensor<int32, [4]> transpose_36_perm_0 = const()[name = tensor<string, []>("transpose_36_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [4]> transpose_37_perm_0 = const()[name = tensor<string, []>("transpose_37_perm_0"), val = tensor<int32, [4]>([0, 2, 3, 1])];
+            tensor<fp32, [1, 12, 32, 128]> transpose_105 = transpose(perm = transpose_37_perm_0, x = x_3)[name = tensor<string, []>("transpose_105")];
+            tensor<fp32, [1, 12, 128, 32]> transpose_106 = transpose(perm = transpose_36_perm_0, x = x_11)[name = tensor<string, []>("transpose_106")];
+            tensor<fp32, [1, 12, 128, 128]> attention_scores_1 = matmul(transpose_x = attention_scores_1_transpose_x_0, transpose_y = attention_scores_1_transpose_y_0, x = transpose_106, y = transpose_105)[name = tensor<string, []>("attention_scores_1")];
+            tensor<fp32, []> _inversed_attention_scores_3_y_0 = const()[name = tensor<string, []>("_inversed_attention_scores_3_y_0"), val = tensor<fp32, []>(0x1.6a09e6p-3)];
+            tensor<fp32, [1, 12, 128, 128]> _inversed_attention_scores_3 = mul(x = attention_scores_1, y = _inversed_attention_scores_3_y_0)[name = tensor<string, []>("_inversed_attention_scores_3")];
+            tensor<fp32, [1, 12, 128, 128]> input_11 = add(x = _inversed_attention_scores_3, y = attention_mask_1)[name = tensor<string, []>("input_11")];
+            tensor<fp32, [1, 12, 128, 128]> input_13 = softmax(axis = var_8, x = input_11)[name = tensor<string, []>("input_13")];
+            tensor<bool, []> context_layer_1_transpose_x_0 = const()[name = tensor<string, []>("context_layer_1_transpose_x_0"), val = tensor<bool, []>(false)];
+            tensor<bool, []> context_layer_1_transpose_y_0 = const()[name = tensor<string, []>("context_layer_1_transpose_y_0"), val = tensor<bool, []>(false)];
+            tensor<fp32, [1, 12, 128, 32]> transpose_107 = transpose(perm = var_117, x = x_7)[name = tensor<string, []>("transpose_107")];
+            tensor<fp32, [1, 12, 128, 32]> context_layer_1 = matmul(transpose_x = context_layer_1_transpose_x_0, transpose_y = context_layer_1_transpose_y_0, x = input_13, y = transpose_107)[name = tensor<string, []>("context_layer_1")];
+            tensor<int32, [4]> var_133 = const()[name = tensor<string, []>("op_133"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [3]> var_138 = const()[name = tensor<string, []>("op_138"), val = tensor<int32, [3]>([1, 128, 384])];
+            tensor<fp32, [1, 128, 12, 32]> transpose_104 = transpose(perm = var_133, x = context_layer_1)[name = tensor<string, []>("transpose_104")];
+            tensor<fp32, [1, 128, 384]> input_15 = reshape(shape = var_138, x = transpose_104)[name = tensor<string, []>("input_15")];
+            tensor<fp32, [1, 128, 384]> linear_3 = linear(bias = model_encoder_layer_0_attention_output_dense_bias, weight = model_encoder_layer_0_attention_output_dense_weight, x = input_15)[name = tensor<string, []>("linear_3")];
+            tensor<fp32, [1, 128, 384]> input_19 = add(x = linear_3, y = input_7)[name = tensor<string, []>("input_19")];
+            tensor<int32, [1]> input_21_axes_0 = const()[name = tensor<string, []>("input_21_axes_0"), val = tensor<int32, [1]>([-1])];
+            tensor<fp32, [1, 128, 384]> input_21 = layer_norm(axes = input_21_axes_0, beta = model_encoder_layer_0_attention_output_LayerNorm_bias, epsilon = var_10, gamma = model_encoder_layer_0_attention_output_LayerNorm_weight, x = input_19)[name = tensor<string, []>("input_21")];
+            tensor<fp32, [1, 128, 1536]> linear_4 = linear(bias = model_encoder_layer_0_intermediate_dense_bias, weight = model_encoder_layer_0_intermediate_dense_weight, x = input_21)[name = tensor<string, []>("linear_4")];
+            tensor<string, []> input_25_mode_0 = const()[name = tensor<string, []>("input_25_mode_0"), val = tensor<string, []>("EXACT")];
+            tensor<fp32, [1, 128, 1536]> input_25 = gelu(mode = input_25_mode_0, x = linear_4)[name = tensor<string, []>("input_25")];
+            tensor<fp32, [1, 128, 384]> linear_5 = linear(bias = model_encoder_layer_0_output_dense_bias, weight = model_encoder_layer_0_output_dense_weight, x = input_25)[name = tensor<string, []>("linear_5")];
+            tensor<fp32, [1, 128, 384]> input_29 = add(x = linear_5, y = input_21)[name = tensor<string, []>("input_29")];
+            tensor<int32, [1]> input_31_axes_0 = const()[name = tensor<string, []>("input_31_axes_0"), val = tensor<int32, [1]>([-1])];
+            tensor<fp32, [1, 128, 384]> input_31 = layer_norm(axes = input_31_axes_0, beta = model_encoder_layer_0_output_LayerNorm_bias, epsilon = var_10, gamma = model_encoder_layer_0_output_LayerNorm_weight, x = input_29)[name = tensor<string, []>("input_31")];
+            tensor<fp32, [1, 128, 384]> linear_6 = linear(bias = model_encoder_layer_1_attention_self_query_bias, weight = model_encoder_layer_1_attention_self_query_weight, x = input_31)[name = tensor<string, []>("linear_6")];
+            tensor<fp32, [1, 128, 384]> linear_7 = linear(bias = model_encoder_layer_1_attention_self_key_bias, weight = model_encoder_layer_1_attention_self_key_weight, x = input_31)[name = tensor<string, []>("linear_7")];
+            tensor<int32, [4]> var_183 = const()[name = tensor<string, []>("op_183"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_15 = reshape(shape = var_183, x = linear_7)[name = tensor<string, []>("x_15")];
+            tensor<fp32, [1, 128, 384]> linear_8 = linear(bias = model_encoder_layer_1_attention_self_value_bias, weight = model_encoder_layer_1_attention_self_value_weight, x = input_31)[name = tensor<string, []>("linear_8")];
+            tensor<int32, [4]> var_192 = const()[name = tensor<string, []>("op_192"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_19 = reshape(shape = var_192, x = linear_8)[name = tensor<string, []>("x_19")];
+            tensor<int32, [4]> var_194 = const()[name = tensor<string, []>("op_194"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [4]> var_198 = const()[name = tensor<string, []>("op_198"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_23 = reshape(shape = var_198, x = linear_6)[name = tensor<string, []>("x_23")];
+            tensor<bool, []> attention_scores_5_transpose_x_0 = const()[name = tensor<string, []>("attention_scores_5_transpose_x_0"), val = tensor<bool, []>(false)];
+            tensor<bool, []> attention_scores_5_transpose_y_0 = const()[name = tensor<string, []>("attention_scores_5_transpose_y_0"), val = tensor<bool, []>(false)];
+            tensor<int32, [4]> transpose_38_perm_0 = const()[name = tensor<string, []>("transpose_38_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [4]> transpose_39_perm_0 = const()[name = tensor<string, []>("transpose_39_perm_0"), val = tensor<int32, [4]>([0, 2, 3, 1])];
+            tensor<fp32, [1, 12, 32, 128]> transpose_101 = transpose(perm = transpose_39_perm_0, x = x_15)[name = tensor<string, []>("transpose_101")];
+            tensor<fp32, [1, 12, 128, 32]> transpose_102 = transpose(perm = transpose_38_perm_0, x = x_23)[name = tensor<string, []>("transpose_102")];
+            tensor<fp32, [1, 12, 128, 128]> attention_scores_5 = matmul(transpose_x = attention_scores_5_transpose_x_0, transpose_y = attention_scores_5_transpose_y_0, x = transpose_102, y = transpose_101)[name = tensor<string, []>("attention_scores_5")];
+            tensor<fp32, []> _inversed_attention_scores_7_y_0 = const()[name = tensor<string, []>("_inversed_attention_scores_7_y_0"), val = tensor<fp32, []>(0x1.6a09e6p-3)];
+            tensor<fp32, [1, 12, 128, 128]> _inversed_attention_scores_7 = mul(x = attention_scores_5, y = _inversed_attention_scores_7_y_0)[name = tensor<string, []>("_inversed_attention_scores_7")];
+            tensor<fp32, [1, 12, 128, 128]> input_33 = add(x = _inversed_attention_scores_7, y = attention_mask_1)[name = tensor<string, []>("input_33")];
+            tensor<fp32, [1, 12, 128, 128]> input_35 = softmax(axis = var_8, x = input_33)[name = tensor<string, []>("input_35")];
+            tensor<bool, []> context_layer_5_transpose_x_0 = const()[name = tensor<string, []>("context_layer_5_transpose_x_0"), val = tensor<bool, []>(false)];
+            tensor<bool, []> context_layer_5_transpose_y_0 = const()[name = tensor<string, []>("context_layer_5_transpose_y_0"), val = tensor<bool, []>(false)];
+            tensor<fp32, [1, 12, 128, 32]> transpose_103 = transpose(perm = var_194, x = x_19)[name = tensor<string, []>("transpose_103")];
+            tensor<fp32, [1, 12, 128, 32]> context_layer_5 = matmul(transpose_x = context_layer_5_transpose_x_0, transpose_y = context_layer_5_transpose_y_0, x = input_35, y = transpose_103)[name = tensor<string, []>("context_layer_5")];
+            tensor<int32, [4]> var_210 = const()[name = tensor<string, []>("op_210"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [3]> var_215 = const()[name = tensor<string, []>("op_215"), val = tensor<int32, [3]>([1, 128, 384])];
+            tensor<fp32, [1, 128, 12, 32]> transpose_100 = transpose(perm = var_210, x = context_layer_5)[name = tensor<string, []>("transpose_100")];
+            tensor<fp32, [1, 128, 384]> input_37 = reshape(shape = var_215, x = transpose_100)[name = tensor<string, []>("input_37")];
+            tensor<fp32, [1, 128, 384]> linear_9 = linear(bias = model_encoder_layer_1_attention_output_dense_bias, weight = model_encoder_layer_1_attention_output_dense_weight, x = input_37)[name = tensor<string, []>("linear_9")];
+            tensor<fp32, [1, 128, 384]> input_41 = add(x = linear_9, y = input_31)[name = tensor<string, []>("input_41")];
+            tensor<int32, [1]> input_43_axes_0 = const()[name = tensor<string, []>("input_43_axes_0"), val = tensor<int32, [1]>([-1])];
+            tensor<fp32, [1, 128, 384]> input_43 = layer_norm(axes = input_43_axes_0, beta = model_encoder_layer_1_attention_output_LayerNorm_bias, epsilon = var_10, gamma = model_encoder_layer_1_attention_output_LayerNorm_weight, x = input_41)[name = tensor<string, []>("input_43")];
+            tensor<fp32, [1, 128, 1536]> linear_10 = linear(bias = model_encoder_layer_1_intermediate_dense_bias, weight = model_encoder_layer_1_intermediate_dense_weight, x = input_43)[name = tensor<string, []>("linear_10")];
+            tensor<string, []> input_47_mode_0 = const()[name = tensor<string, []>("input_47_mode_0"), val = tensor<string, []>("EXACT")];
+            tensor<fp32, [1, 128, 1536]> input_47 = gelu(mode = input_47_mode_0, x = linear_10)[name = tensor<string, []>("input_47")];
+            tensor<fp32, [1, 128, 384]> linear_11 = linear(bias = model_encoder_layer_1_output_dense_bias, weight = model_encoder_layer_1_output_dense_weight, x = input_47)[name = tensor<string, []>("linear_11")];
+            tensor<fp32, [1, 128, 384]> input_51 = add(x = linear_11, y = input_43)[name = tensor<string, []>("input_51")];
+            tensor<int32, [1]> input_53_axes_0 = const()[name = tensor<string, []>("input_53_axes_0"), val = tensor<int32, [1]>([-1])];
+            tensor<fp32, [1, 128, 384]> input_53 = layer_norm(axes = input_53_axes_0, beta = model_encoder_layer_1_output_LayerNorm_bias, epsilon = var_10, gamma = model_encoder_layer_1_output_LayerNorm_weight, x = input_51)[name = tensor<string, []>("input_53")];
+            tensor<fp32, [1, 128, 384]> linear_12 = linear(bias = model_encoder_layer_2_attention_self_query_bias, weight = model_encoder_layer_2_attention_self_query_weight, x = input_53)[name = tensor<string, []>("linear_12")];
+            tensor<fp32, [1, 128, 384]> linear_13 = linear(bias = model_encoder_layer_2_attention_self_key_bias, weight = model_encoder_layer_2_attention_self_key_weight, x = input_53)[name = tensor<string, []>("linear_13")];
+            tensor<int32, [4]> var_260 = const()[name = tensor<string, []>("op_260"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_27 = reshape(shape = var_260, x = linear_13)[name = tensor<string, []>("x_27")];
+            tensor<fp32, [1, 128, 384]> linear_14 = linear(bias = model_encoder_layer_2_attention_self_value_bias, weight = model_encoder_layer_2_attention_self_value_weight, x = input_53)[name = tensor<string, []>("linear_14")];
+            tensor<int32, [4]> var_269 = const()[name = tensor<string, []>("op_269"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_31 = reshape(shape = var_269, x = linear_14)[name = tensor<string, []>("x_31")];
+            tensor<int32, [4]> var_271 = const()[name = tensor<string, []>("op_271"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [4]> var_275 = const()[name = tensor<string, []>("op_275"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_35 = reshape(shape = var_275, x = linear_12)[name = tensor<string, []>("x_35")];
+            tensor<bool, []> attention_scores_9_transpose_x_0 = const()[name = tensor<string, []>("attention_scores_9_transpose_x_0"), val = tensor<bool, []>(false)];
+            tensor<bool, []> attention_scores_9_transpose_y_0 = const()[name = tensor<string, []>("attention_scores_9_transpose_y_0"), val = tensor<bool, []>(false)];
+            tensor<int32, [4]> transpose_40_perm_0 = const()[name = tensor<string, []>("transpose_40_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [4]> transpose_41_perm_0 = const()[name = tensor<string, []>("transpose_41_perm_0"), val = tensor<int32, [4]>([0, 2, 3, 1])];
+            tensor<fp32, [1, 12, 32, 128]> transpose_97 = transpose(perm = transpose_41_perm_0, x = x_27)[name = tensor<string, []>("transpose_97")];
+            tensor<fp32, [1, 12, 128, 32]> transpose_98 = transpose(perm = transpose_40_perm_0, x = x_35)[name = tensor<string, []>("transpose_98")];
+            tensor<fp32, [1, 12, 128, 128]> attention_scores_9 = matmul(transpose_x = attention_scores_9_transpose_x_0, transpose_y = attention_scores_9_transpose_y_0, x = transpose_98, y = transpose_97)[name = tensor<string, []>("attention_scores_9")];
+            tensor<fp32, []> _inversed_attention_scores_11_y_0 = const()[name = tensor<string, []>("_inversed_attention_scores_11_y_0"), val = tensor<fp32, []>(0x1.6a09e6p-3)];
+            tensor<fp32, [1, 12, 128, 128]> _inversed_attention_scores_11 = mul(x = attention_scores_9, y = _inversed_attention_scores_11_y_0)[name = tensor<string, []>("_inversed_attention_scores_11")];
+            tensor<fp32, [1, 12, 128, 128]> input_55 = add(x = _inversed_attention_scores_11, y = attention_mask_1)[name = tensor<string, []>("input_55")];
+            tensor<fp32, [1, 12, 128, 128]> input_57 = softmax(axis = var_8, x = input_55)[name = tensor<string, []>("input_57")];
+            tensor<bool, []> context_layer_9_transpose_x_0 = const()[name = tensor<string, []>("context_layer_9_transpose_x_0"), val = tensor<bool, []>(false)];
+            tensor<bool, []> context_layer_9_transpose_y_0 = const()[name = tensor<string, []>("context_layer_9_transpose_y_0"), val = tensor<bool, []>(false)];
+            tensor<fp32, [1, 12, 128, 32]> transpose_99 = transpose(perm = var_271, x = x_31)[name = tensor<string, []>("transpose_99")];
+            tensor<fp32, [1, 12, 128, 32]> context_layer_9 = matmul(transpose_x = context_layer_9_transpose_x_0, transpose_y = context_layer_9_transpose_y_0, x = input_57, y = transpose_99)[name = tensor<string, []>("context_layer_9")];
+            tensor<int32, [4]> var_287 = const()[name = tensor<string, []>("op_287"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [3]> var_292 = const()[name = tensor<string, []>("op_292"), val = tensor<int32, [3]>([1, 128, 384])];
+            tensor<fp32, [1, 128, 12, 32]> transpose_96 = transpose(perm = var_287, x = context_layer_9)[name = tensor<string, []>("transpose_96")];
+            tensor<fp32, [1, 128, 384]> input_59 = reshape(shape = var_292, x = transpose_96)[name = tensor<string, []>("input_59")];
+            tensor<fp32, [1, 128, 384]> linear_15 = linear(bias = model_encoder_layer_2_attention_output_dense_bias, weight = model_encoder_layer_2_attention_output_dense_weight, x = input_59)[name = tensor<string, []>("linear_15")];
+            tensor<fp32, [1, 128, 384]> input_63 = add(x = linear_15, y = input_53)[name = tensor<string, []>("input_63")];
+            tensor<int32, [1]> input_65_axes_0 = const()[name = tensor<string, []>("input_65_axes_0"), val = tensor<int32, [1]>([-1])];
+            tensor<fp32, [1, 128, 384]> input_65 = layer_norm(axes = input_65_axes_0, beta = model_encoder_layer_2_attention_output_LayerNorm_bias, epsilon = var_10, gamma = model_encoder_layer_2_attention_output_LayerNorm_weight, x = input_63)[name = tensor<string, []>("input_65")];
+            tensor<fp32, [1, 128, 1536]> linear_16 = linear(bias = model_encoder_layer_2_intermediate_dense_bias, weight = model_encoder_layer_2_intermediate_dense_weight, x = input_65)[name = tensor<string, []>("linear_16")];
+            tensor<string, []> input_69_mode_0 = const()[name = tensor<string, []>("input_69_mode_0"), val = tensor<string, []>("EXACT")];
+            tensor<fp32, [1, 128, 1536]> input_69 = gelu(mode = input_69_mode_0, x = linear_16)[name = tensor<string, []>("input_69")];
+            tensor<fp32, [1, 128, 384]> linear_17 = linear(bias = model_encoder_layer_2_output_dense_bias, weight = model_encoder_layer_2_output_dense_weight, x = input_69)[name = tensor<string, []>("linear_17")];
+            tensor<fp32, [1, 128, 384]> input_73 = add(x = linear_17, y = input_65)[name = tensor<string, []>("input_73")];
+            tensor<int32, [1]> input_75_axes_0 = const()[name = tensor<string, []>("input_75_axes_0"), val = tensor<int32, [1]>([-1])];
+            tensor<fp32, [1, 128, 384]> input_75 = layer_norm(axes = input_75_axes_0, beta = model_encoder_layer_2_output_LayerNorm_bias, epsilon = var_10, gamma = model_encoder_layer_2_output_LayerNorm_weight, x = input_73)[name = tensor<string, []>("input_75")];
+            tensor<fp32, [1, 128, 384]> linear_18 = linear(bias = model_encoder_layer_3_attention_self_query_bias, weight = model_encoder_layer_3_attention_self_query_weight, x = input_75)[name = tensor<string, []>("linear_18")];
+            tensor<fp32, [1, 128, 384]> linear_19 = linear(bias = model_encoder_layer_3_attention_self_key_bias, weight = model_encoder_layer_3_attention_self_key_weight, x = input_75)[name = tensor<string, []>("linear_19")];
+            tensor<int32, [4]> var_337 = const()[name = tensor<string, []>("op_337"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_39 = reshape(shape = var_337, x = linear_19)[name = tensor<string, []>("x_39")];
+            tensor<fp32, [1, 128, 384]> linear_20 = linear(bias = model_encoder_layer_3_attention_self_value_bias, weight = model_encoder_layer_3_attention_self_value_weight, x = input_75)[name = tensor<string, []>("linear_20")];
+            tensor<int32, [4]> var_346 = const()[name = tensor<string, []>("op_346"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_43 = reshape(shape = var_346, x = linear_20)[name = tensor<string, []>("x_43")];
+            tensor<int32, [4]> var_348 = const()[name = tensor<string, []>("op_348"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [4]> var_352 = const()[name = tensor<string, []>("op_352"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_47 = reshape(shape = var_352, x = linear_18)[name = tensor<string, []>("x_47")];
+            tensor<bool, []> attention_scores_13_transpose_x_0 = const()[name = tensor<string, []>("attention_scores_13_transpose_x_0"), val = tensor<bool, []>(false)];
+            tensor<bool, []> attention_scores_13_transpose_y_0 = const()[name = tensor<string, []>("attention_scores_13_transpose_y_0"), val = tensor<bool, []>(false)];
+            tensor<int32, [4]> transpose_42_perm_0 = const()[name = tensor<string, []>("transpose_42_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [4]> transpose_43_perm_0 = const()[name = tensor<string, []>("transpose_43_perm_0"), val = tensor<int32, [4]>([0, 2, 3, 1])];
+            tensor<fp32, [1, 12, 32, 128]> transpose_93 = transpose(perm = transpose_43_perm_0, x = x_39)[name = tensor<string, []>("transpose_93")];
+            tensor<fp32, [1, 12, 128, 32]> transpose_94 = transpose(perm = transpose_42_perm_0, x = x_47)[name = tensor<string, []>("transpose_94")];
+            tensor<fp32, [1, 12, 128, 128]> attention_scores_13 = matmul(transpose_x = attention_scores_13_transpose_x_0, transpose_y = attention_scores_13_transpose_y_0, x = transpose_94, y = transpose_93)[name = tensor<string, []>("attention_scores_13")];
+            tensor<fp32, []> _inversed_attention_scores_15_y_0 = const()[name = tensor<string, []>("_inversed_attention_scores_15_y_0"), val = tensor<fp32, []>(0x1.6a09e6p-3)];
+            tensor<fp32, [1, 12, 128, 128]> _inversed_attention_scores_15 = mul(x = attention_scores_13, y = _inversed_attention_scores_15_y_0)[name = tensor<string, []>("_inversed_attention_scores_15")];
+            tensor<fp32, [1, 12, 128, 128]> input_77 = add(x = _inversed_attention_scores_15, y = attention_mask_1)[name = tensor<string, []>("input_77")];
+            tensor<fp32, [1, 12, 128, 128]> input_79 = softmax(axis = var_8, x = input_77)[name = tensor<string, []>("input_79")];
+            tensor<bool, []> context_layer_13_transpose_x_0 = const()[name = tensor<string, []>("context_layer_13_transpose_x_0"), val = tensor<bool, []>(false)];
+            tensor<bool, []> context_layer_13_transpose_y_0 = const()[name = tensor<string, []>("context_layer_13_transpose_y_0"), val = tensor<bool, []>(false)];
+            tensor<fp32, [1, 12, 128, 32]> transpose_95 = transpose(perm = var_348, x = x_43)[name = tensor<string, []>("transpose_95")];
+            tensor<fp32, [1, 12, 128, 32]> context_layer_13 = matmul(transpose_x = context_layer_13_transpose_x_0, transpose_y = context_layer_13_transpose_y_0, x = input_79, y = transpose_95)[name = tensor<string, []>("context_layer_13")];
+            tensor<int32, [4]> var_364 = const()[name = tensor<string, []>("op_364"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [3]> var_369 = const()[name = tensor<string, []>("op_369"), val = tensor<int32, [3]>([1, 128, 384])];
+            tensor<fp32, [1, 128, 12, 32]> transpose_92 = transpose(perm = var_364, x = context_layer_13)[name = tensor<string, []>("transpose_92")];
+            tensor<fp32, [1, 128, 384]> input_81 = reshape(shape = var_369, x = transpose_92)[name = tensor<string, []>("input_81")];
+            tensor<fp32, [1, 128, 384]> linear_21 = linear(bias = model_encoder_layer_3_attention_output_dense_bias, weight = model_encoder_layer_3_attention_output_dense_weight, x = input_81)[name = tensor<string, []>("linear_21")];
+            tensor<fp32, [1, 128, 384]> input_85 = add(x = linear_21, y = input_75)[name = tensor<string, []>("input_85")];
+            tensor<int32, [1]> input_87_axes_0 = const()[name = tensor<string, []>("input_87_axes_0"), val = tensor<int32, [1]>([-1])];
+            tensor<fp32, [1, 128, 384]> input_87 = layer_norm(axes = input_87_axes_0, beta = model_encoder_layer_3_attention_output_LayerNorm_bias, epsilon = var_10, gamma = model_encoder_layer_3_attention_output_LayerNorm_weight, x = input_85)[name = tensor<string, []>("input_87")];
+            tensor<fp32, [1, 128, 1536]> linear_22 = linear(bias = model_encoder_layer_3_intermediate_dense_bias, weight = model_encoder_layer_3_intermediate_dense_weight, x = input_87)[name = tensor<string, []>("linear_22")];
+            tensor<string, []> input_91_mode_0 = const()[name = tensor<string, []>("input_91_mode_0"), val = tensor<string, []>("EXACT")];
+            tensor<fp32, [1, 128, 1536]> input_91 = gelu(mode = input_91_mode_0, x = linear_22)[name = tensor<string, []>("input_91")];
+            tensor<fp32, [1, 128, 384]> linear_23 = linear(bias = model_encoder_layer_3_output_dense_bias, weight = model_encoder_layer_3_output_dense_weight, x = input_91)[name = tensor<string, []>("linear_23")];
+            tensor<fp32, [1, 128, 384]> input_95 = add(x = linear_23, y = input_87)[name = tensor<string, []>("input_95")];
+            tensor<int32, [1]> input_97_axes_0 = const()[name = tensor<string, []>("input_97_axes_0"), val = tensor<int32, [1]>([-1])];
+            tensor<fp32, [1, 128, 384]> input_97 = layer_norm(axes = input_97_axes_0, beta = model_encoder_layer_3_output_LayerNorm_bias, epsilon = var_10, gamma = model_encoder_layer_3_output_LayerNorm_weight, x = input_95)[name = tensor<string, []>("input_97")];
+            tensor<fp32, [1, 128, 384]> linear_24 = linear(bias = model_encoder_layer_4_attention_self_query_bias, weight = model_encoder_layer_4_attention_self_query_weight, x = input_97)[name = tensor<string, []>("linear_24")];
+            tensor<fp32, [1, 128, 384]> linear_25 = linear(bias = model_encoder_layer_4_attention_self_key_bias, weight = model_encoder_layer_4_attention_self_key_weight, x = input_97)[name = tensor<string, []>("linear_25")];
+            tensor<int32, [4]> var_414 = const()[name = tensor<string, []>("op_414"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_51 = reshape(shape = var_414, x = linear_25)[name = tensor<string, []>("x_51")];
+            tensor<fp32, [1, 128, 384]> linear_26 = linear(bias = model_encoder_layer_4_attention_self_value_bias, weight = model_encoder_layer_4_attention_self_value_weight, x = input_97)[name = tensor<string, []>("linear_26")];
+            tensor<int32, [4]> var_423 = const()[name = tensor<string, []>("op_423"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_55 = reshape(shape = var_423, x = linear_26)[name = tensor<string, []>("x_55")];
+            tensor<int32, [4]> var_425 = const()[name = tensor<string, []>("op_425"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [4]> var_429 = const()[name = tensor<string, []>("op_429"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_59 = reshape(shape = var_429, x = linear_24)[name = tensor<string, []>("x_59")];
+            tensor<bool, []> attention_scores_17_transpose_x_0 = const()[name = tensor<string, []>("attention_scores_17_transpose_x_0"), val = tensor<bool, []>(false)];
+            tensor<bool, []> attention_scores_17_transpose_y_0 = const()[name = tensor<string, []>("attention_scores_17_transpose_y_0"), val = tensor<bool, []>(false)];
+            tensor<int32, [4]> transpose_44_perm_0 = const()[name = tensor<string, []>("transpose_44_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [4]> transpose_45_perm_0 = const()[name = tensor<string, []>("transpose_45_perm_0"), val = tensor<int32, [4]>([0, 2, 3, 1])];
+            tensor<fp32, [1, 12, 32, 128]> transpose_89 = transpose(perm = transpose_45_perm_0, x = x_51)[name = tensor<string, []>("transpose_89")];
+            tensor<fp32, [1, 12, 128, 32]> transpose_90 = transpose(perm = transpose_44_perm_0, x = x_59)[name = tensor<string, []>("transpose_90")];
+            tensor<fp32, [1, 12, 128, 128]> attention_scores_17 = matmul(transpose_x = attention_scores_17_transpose_x_0, transpose_y = attention_scores_17_transpose_y_0, x = transpose_90, y = transpose_89)[name = tensor<string, []>("attention_scores_17")];
+            tensor<fp32, []> _inversed_attention_scores_19_y_0 = const()[name = tensor<string, []>("_inversed_attention_scores_19_y_0"), val = tensor<fp32, []>(0x1.6a09e6p-3)];
+            tensor<fp32, [1, 12, 128, 128]> _inversed_attention_scores_19 = mul(x = attention_scores_17, y = _inversed_attention_scores_19_y_0)[name = tensor<string, []>("_inversed_attention_scores_19")];
+            tensor<fp32, [1, 12, 128, 128]> input_99 = add(x = _inversed_attention_scores_19, y = attention_mask_1)[name = tensor<string, []>("input_99")];
+            tensor<fp32, [1, 12, 128, 128]> input_101 = softmax(axis = var_8, x = input_99)[name = tensor<string, []>("input_101")];
+            tensor<bool, []> context_layer_17_transpose_x_0 = const()[name = tensor<string, []>("context_layer_17_transpose_x_0"), val = tensor<bool, []>(false)];
+            tensor<bool, []> context_layer_17_transpose_y_0 = const()[name = tensor<string, []>("context_layer_17_transpose_y_0"), val = tensor<bool, []>(false)];
+            tensor<fp32, [1, 12, 128, 32]> transpose_91 = transpose(perm = var_425, x = x_55)[name = tensor<string, []>("transpose_91")];
+            tensor<fp32, [1, 12, 128, 32]> context_layer_17 = matmul(transpose_x = context_layer_17_transpose_x_0, transpose_y = context_layer_17_transpose_y_0, x = input_101, y = transpose_91)[name = tensor<string, []>("context_layer_17")];
+            tensor<int32, [4]> var_441 = const()[name = tensor<string, []>("op_441"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [3]> var_446 = const()[name = tensor<string, []>("op_446"), val = tensor<int32, [3]>([1, 128, 384])];
+            tensor<fp32, [1, 128, 12, 32]> transpose_88 = transpose(perm = var_441, x = context_layer_17)[name = tensor<string, []>("transpose_88")];
+            tensor<fp32, [1, 128, 384]> input_103 = reshape(shape = var_446, x = transpose_88)[name = tensor<string, []>("input_103")];
+            tensor<fp32, [1, 128, 384]> linear_27 = linear(bias = model_encoder_layer_4_attention_output_dense_bias, weight = model_encoder_layer_4_attention_output_dense_weight, x = input_103)[name = tensor<string, []>("linear_27")];
+            tensor<fp32, [1, 128, 384]> input_107 = add(x = linear_27, y = input_97)[name = tensor<string, []>("input_107")];
+            tensor<int32, [1]> input_109_axes_0 = const()[name = tensor<string, []>("input_109_axes_0"), val = tensor<int32, [1]>([-1])];
+            tensor<fp32, [1, 128, 384]> input_109 = layer_norm(axes = input_109_axes_0, beta = model_encoder_layer_4_attention_output_LayerNorm_bias, epsilon = var_10, gamma = model_encoder_layer_4_attention_output_LayerNorm_weight, x = input_107)[name = tensor<string, []>("input_109")];
+            tensor<fp32, [1, 128, 1536]> linear_28 = linear(bias = model_encoder_layer_4_intermediate_dense_bias, weight = model_encoder_layer_4_intermediate_dense_weight, x = input_109)[name = tensor<string, []>("linear_28")];
+            tensor<string, []> input_113_mode_0 = const()[name = tensor<string, []>("input_113_mode_0"), val = tensor<string, []>("EXACT")];
+            tensor<fp32, [1, 128, 1536]> input_113 = gelu(mode = input_113_mode_0, x = linear_28)[name = tensor<string, []>("input_113")];
+            tensor<fp32, [1, 128, 384]> linear_29 = linear(bias = model_encoder_layer_4_output_dense_bias, weight = model_encoder_layer_4_output_dense_weight, x = input_113)[name = tensor<string, []>("linear_29")];
+            tensor<fp32, [1, 128, 384]> input_117 = add(x = linear_29, y = input_109)[name = tensor<string, []>("input_117")];
+            tensor<int32, [1]> input_119_axes_0 = const()[name = tensor<string, []>("input_119_axes_0"), val = tensor<int32, [1]>([-1])];
+            tensor<fp32, [1, 128, 384]> input_119 = layer_norm(axes = input_119_axes_0, beta = model_encoder_layer_4_output_LayerNorm_bias, epsilon = var_10, gamma = model_encoder_layer_4_output_LayerNorm_weight, x = input_117)[name = tensor<string, []>("input_119")];
+            tensor<fp32, [1, 128, 384]> linear_30 = linear(bias = model_encoder_layer_5_attention_self_query_bias, weight = model_encoder_layer_5_attention_self_query_weight, x = input_119)[name = tensor<string, []>("linear_30")];
+            tensor<fp32, [1, 128, 384]> linear_31 = linear(bias = model_encoder_layer_5_attention_self_key_bias, weight = model_encoder_layer_5_attention_self_key_weight, x = input_119)[name = tensor<string, []>("linear_31")];
+            tensor<int32, [4]> var_491 = const()[name = tensor<string, []>("op_491"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_63 = reshape(shape = var_491, x = linear_31)[name = tensor<string, []>("x_63")];
+            tensor<fp32, [1, 128, 384]> linear_32 = linear(bias = model_encoder_layer_5_attention_self_value_bias, weight = model_encoder_layer_5_attention_self_value_weight, x = input_119)[name = tensor<string, []>("linear_32")];
+            tensor<int32, [4]> var_500 = const()[name = tensor<string, []>("op_500"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_67 = reshape(shape = var_500, x = linear_32)[name = tensor<string, []>("x_67")];
+            tensor<int32, [4]> var_502 = const()[name = tensor<string, []>("op_502"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [4]> var_506 = const()[name = tensor<string, []>("op_506"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_71 = reshape(shape = var_506, x = linear_30)[name = tensor<string, []>("x_71")];
+            tensor<bool, []> attention_scores_21_transpose_x_0 = const()[name = tensor<string, []>("attention_scores_21_transpose_x_0"), val = tensor<bool, []>(false)];
+            tensor<bool, []> attention_scores_21_transpose_y_0 = const()[name = tensor<string, []>("attention_scores_21_transpose_y_0"), val = tensor<bool, []>(false)];
+            tensor<int32, [4]> transpose_46_perm_0 = const()[name = tensor<string, []>("transpose_46_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [4]> transpose_47_perm_0 = const()[name = tensor<string, []>("transpose_47_perm_0"), val = tensor<int32, [4]>([0, 2, 3, 1])];
+            tensor<fp32, [1, 12, 32, 128]> transpose_85 = transpose(perm = transpose_47_perm_0, x = x_63)[name = tensor<string, []>("transpose_85")];
+            tensor<fp32, [1, 12, 128, 32]> transpose_86 = transpose(perm = transpose_46_perm_0, x = x_71)[name = tensor<string, []>("transpose_86")];
+            tensor<fp32, [1, 12, 128, 128]> attention_scores_21 = matmul(transpose_x = attention_scores_21_transpose_x_0, transpose_y = attention_scores_21_transpose_y_0, x = transpose_86, y = transpose_85)[name = tensor<string, []>("attention_scores_21")];
+            tensor<fp32, []> _inversed_attention_scores_23_y_0 = const()[name = tensor<string, []>("_inversed_attention_scores_23_y_0"), val = tensor<fp32, []>(0x1.6a09e6p-3)];
+            tensor<fp32, [1, 12, 128, 128]> _inversed_attention_scores_23 = mul(x = attention_scores_21, y = _inversed_attention_scores_23_y_0)[name = tensor<string, []>("_inversed_attention_scores_23")];
+            tensor<fp32, [1, 12, 128, 128]> input_121 = add(x = _inversed_attention_scores_23, y = attention_mask_1)[name = tensor<string, []>("input_121")];
+            tensor<fp32, [1, 12, 128, 128]> input_123 = softmax(axis = var_8, x = input_121)[name = tensor<string, []>("input_123")];
+            tensor<bool, []> context_layer_21_transpose_x_0 = const()[name = tensor<string, []>("context_layer_21_transpose_x_0"), val = tensor<bool, []>(false)];
+            tensor<bool, []> context_layer_21_transpose_y_0 = const()[name = tensor<string, []>("context_layer_21_transpose_y_0"), val = tensor<bool, []>(false)];
+            tensor<fp32, [1, 12, 128, 32]> transpose_87 = transpose(perm = var_502, x = x_67)[name = tensor<string, []>("transpose_87")];
+            tensor<fp32, [1, 12, 128, 32]> context_layer_21 = matmul(transpose_x = context_layer_21_transpose_x_0, transpose_y = context_layer_21_transpose_y_0, x = input_123, y = transpose_87)[name = tensor<string, []>("context_layer_21")];
+            tensor<int32, [4]> var_518 = const()[name = tensor<string, []>("op_518"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [3]> var_523 = const()[name = tensor<string, []>("op_523"), val = tensor<int32, [3]>([1, 128, 384])];
+            tensor<fp32, [1, 128, 12, 32]> transpose_84 = transpose(perm = var_518, x = context_layer_21)[name = tensor<string, []>("transpose_84")];
+            tensor<fp32, [1, 128, 384]> input_125 = reshape(shape = var_523, x = transpose_84)[name = tensor<string, []>("input_125")];
+            tensor<fp32, [1, 128, 384]> linear_33 = linear(bias = model_encoder_layer_5_attention_output_dense_bias, weight = model_encoder_layer_5_attention_output_dense_weight, x = input_125)[name = tensor<string, []>("linear_33")];
+            tensor<fp32, [1, 128, 384]> input_129 = add(x = linear_33, y = input_119)[name = tensor<string, []>("input_129")];
+            tensor<int32, [1]> input_131_axes_0 = const()[name = tensor<string, []>("input_131_axes_0"), val = tensor<int32, [1]>([-1])];
+            tensor<fp32, [1, 128, 384]> input_131 = layer_norm(axes = input_131_axes_0, beta = model_encoder_layer_5_attention_output_LayerNorm_bias, epsilon = var_10, gamma = model_encoder_layer_5_attention_output_LayerNorm_weight, x = input_129)[name = tensor<string, []>("input_131")];
+            tensor<fp32, [1, 128, 1536]> linear_34 = linear(bias = model_encoder_layer_5_intermediate_dense_bias, weight = model_encoder_layer_5_intermediate_dense_weight, x = input_131)[name = tensor<string, []>("linear_34")];
+            tensor<string, []> input_135_mode_0 = const()[name = tensor<string, []>("input_135_mode_0"), val = tensor<string, []>("EXACT")];
+            tensor<fp32, [1, 128, 1536]> input_135 = gelu(mode = input_135_mode_0, x = linear_34)[name = tensor<string, []>("input_135")];
+            tensor<fp32, [1, 128, 384]> linear_35 = linear(bias = model_encoder_layer_5_output_dense_bias, weight = model_encoder_layer_5_output_dense_weight, x = input_135)[name = tensor<string, []>("linear_35")];
+            tensor<fp32, [1, 128, 384]> input_139 = add(x = linear_35, y = input_131)[name = tensor<string, []>("input_139")];
+            tensor<int32, [1]> input_141_axes_0 = const()[name = tensor<string, []>("input_141_axes_0"), val = tensor<int32, [1]>([-1])];
+            tensor<fp32, [1, 128, 384]> input_141 = layer_norm(axes = input_141_axes_0, beta = model_encoder_layer_5_output_LayerNorm_bias, epsilon = var_10, gamma = model_encoder_layer_5_output_LayerNorm_weight, x = input_139)[name = tensor<string, []>("input_141")];
+            tensor<fp32, [1, 128, 384]> linear_36 = linear(bias = model_encoder_layer_6_attention_self_query_bias, weight = model_encoder_layer_6_attention_self_query_weight, x = input_141)[name = tensor<string, []>("linear_36")];
+            tensor<fp32, [1, 128, 384]> linear_37 = linear(bias = model_encoder_layer_6_attention_self_key_bias, weight = model_encoder_layer_6_attention_self_key_weight, x = input_141)[name = tensor<string, []>("linear_37")];
+            tensor<int32, [4]> var_568 = const()[name = tensor<string, []>("op_568"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_75 = reshape(shape = var_568, x = linear_37)[name = tensor<string, []>("x_75")];
+            tensor<fp32, [1, 128, 384]> linear_38 = linear(bias = model_encoder_layer_6_attention_self_value_bias, weight = model_encoder_layer_6_attention_self_value_weight, x = input_141)[name = tensor<string, []>("linear_38")];
+            tensor<int32, [4]> var_577 = const()[name = tensor<string, []>("op_577"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_79 = reshape(shape = var_577, x = linear_38)[name = tensor<string, []>("x_79")];
+            tensor<int32, [4]> var_579 = const()[name = tensor<string, []>("op_579"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [4]> var_583 = const()[name = tensor<string, []>("op_583"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_83 = reshape(shape = var_583, x = linear_36)[name = tensor<string, []>("x_83")];
+            tensor<bool, []> attention_scores_25_transpose_x_0 = const()[name = tensor<string, []>("attention_scores_25_transpose_x_0"), val = tensor<bool, []>(false)];
+            tensor<bool, []> attention_scores_25_transpose_y_0 = const()[name = tensor<string, []>("attention_scores_25_transpose_y_0"), val = tensor<bool, []>(false)];
+            tensor<int32, [4]> transpose_48_perm_0 = const()[name = tensor<string, []>("transpose_48_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [4]> transpose_49_perm_0 = const()[name = tensor<string, []>("transpose_49_perm_0"), val = tensor<int32, [4]>([0, 2, 3, 1])];
+            tensor<fp32, [1, 12, 32, 128]> transpose_81 = transpose(perm = transpose_49_perm_0, x = x_75)[name = tensor<string, []>("transpose_81")];
+            tensor<fp32, [1, 12, 128, 32]> transpose_82 = transpose(perm = transpose_48_perm_0, x = x_83)[name = tensor<string, []>("transpose_82")];
+            tensor<fp32, [1, 12, 128, 128]> attention_scores_25 = matmul(transpose_x = attention_scores_25_transpose_x_0, transpose_y = attention_scores_25_transpose_y_0, x = transpose_82, y = transpose_81)[name = tensor<string, []>("attention_scores_25")];
+            tensor<fp32, []> _inversed_attention_scores_27_y_0 = const()[name = tensor<string, []>("_inversed_attention_scores_27_y_0"), val = tensor<fp32, []>(0x1.6a09e6p-3)];
+            tensor<fp32, [1, 12, 128, 128]> _inversed_attention_scores_27 = mul(x = attention_scores_25, y = _inversed_attention_scores_27_y_0)[name = tensor<string, []>("_inversed_attention_scores_27")];
+            tensor<fp32, [1, 12, 128, 128]> input_143 = add(x = _inversed_attention_scores_27, y = attention_mask_1)[name = tensor<string, []>("input_143")];
+            tensor<fp32, [1, 12, 128, 128]> input_145 = softmax(axis = var_8, x = input_143)[name = tensor<string, []>("input_145")];
+            tensor<bool, []> context_layer_25_transpose_x_0 = const()[name = tensor<string, []>("context_layer_25_transpose_x_0"), val = tensor<bool, []>(false)];
+            tensor<bool, []> context_layer_25_transpose_y_0 = const()[name = tensor<string, []>("context_layer_25_transpose_y_0"), val = tensor<bool, []>(false)];
+            tensor<fp32, [1, 12, 128, 32]> transpose_83 = transpose(perm = var_579, x = x_79)[name = tensor<string, []>("transpose_83")];
+            tensor<fp32, [1, 12, 128, 32]> context_layer_25 = matmul(transpose_x = context_layer_25_transpose_x_0, transpose_y = context_layer_25_transpose_y_0, x = input_145, y = transpose_83)[name = tensor<string, []>("context_layer_25")];
+            tensor<int32, [4]> var_595 = const()[name = tensor<string, []>("op_595"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [3]> var_600 = const()[name = tensor<string, []>("op_600"), val = tensor<int32, [3]>([1, 128, 384])];
+            tensor<fp32, [1, 128, 12, 32]> transpose_80 = transpose(perm = var_595, x = context_layer_25)[name = tensor<string, []>("transpose_80")];
+            tensor<fp32, [1, 128, 384]> input_147 = reshape(shape = var_600, x = transpose_80)[name = tensor<string, []>("input_147")];
+            tensor<fp32, [1, 128, 384]> linear_39 = linear(bias = model_encoder_layer_6_attention_output_dense_bias, weight = model_encoder_layer_6_attention_output_dense_weight, x = input_147)[name = tensor<string, []>("linear_39")];
+            tensor<fp32, [1, 128, 384]> input_151 = add(x = linear_39, y = input_141)[name = tensor<string, []>("input_151")];
+            tensor<int32, [1]> input_153_axes_0 = const()[name = tensor<string, []>("input_153_axes_0"), val = tensor<int32, [1]>([-1])];
+            tensor<fp32, [1, 128, 384]> input_153 = layer_norm(axes = input_153_axes_0, beta = model_encoder_layer_6_attention_output_LayerNorm_bias, epsilon = var_10, gamma = model_encoder_layer_6_attention_output_LayerNorm_weight, x = input_151)[name = tensor<string, []>("input_153")];
+            tensor<fp32, [1, 128, 1536]> linear_40 = linear(bias = model_encoder_layer_6_intermediate_dense_bias, weight = model_encoder_layer_6_intermediate_dense_weight, x = input_153)[name = tensor<string, []>("linear_40")];
+            tensor<string, []> input_157_mode_0 = const()[name = tensor<string, []>("input_157_mode_0"), val = tensor<string, []>("EXACT")];
+            tensor<fp32, [1, 128, 1536]> input_157 = gelu(mode = input_157_mode_0, x = linear_40)[name = tensor<string, []>("input_157")];
+            tensor<fp32, [1, 128, 384]> linear_41 = linear(bias = model_encoder_layer_6_output_dense_bias, weight = model_encoder_layer_6_output_dense_weight, x = input_157)[name = tensor<string, []>("linear_41")];
+            tensor<fp32, [1, 128, 384]> input_161 = add(x = linear_41, y = input_153)[name = tensor<string, []>("input_161")];
+            tensor<int32, [1]> input_163_axes_0 = const()[name = tensor<string, []>("input_163_axes_0"), val = tensor<int32, [1]>([-1])];
+            tensor<fp32, [1, 128, 384]> input_163 = layer_norm(axes = input_163_axes_0, beta = model_encoder_layer_6_output_LayerNorm_bias, epsilon = var_10, gamma = model_encoder_layer_6_output_LayerNorm_weight, x = input_161)[name = tensor<string, []>("input_163")];
+            tensor<fp32, [1, 128, 384]> linear_42 = linear(bias = model_encoder_layer_7_attention_self_query_bias, weight = model_encoder_layer_7_attention_self_query_weight, x = input_163)[name = tensor<string, []>("linear_42")];
+            tensor<fp32, [1, 128, 384]> linear_43 = linear(bias = model_encoder_layer_7_attention_self_key_bias, weight = model_encoder_layer_7_attention_self_key_weight, x = input_163)[name = tensor<string, []>("linear_43")];
+            tensor<int32, [4]> var_645 = const()[name = tensor<string, []>("op_645"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_87 = reshape(shape = var_645, x = linear_43)[name = tensor<string, []>("x_87")];
+            tensor<fp32, [1, 128, 384]> linear_44 = linear(bias = model_encoder_layer_7_attention_self_value_bias, weight = model_encoder_layer_7_attention_self_value_weight, x = input_163)[name = tensor<string, []>("linear_44")];
+            tensor<int32, [4]> var_654 = const()[name = tensor<string, []>("op_654"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_91 = reshape(shape = var_654, x = linear_44)[name = tensor<string, []>("x_91")];
+            tensor<int32, [4]> var_656 = const()[name = tensor<string, []>("op_656"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [4]> var_660 = const()[name = tensor<string, []>("op_660"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_95 = reshape(shape = var_660, x = linear_42)[name = tensor<string, []>("x_95")];
+            tensor<bool, []> attention_scores_29_transpose_x_0 = const()[name = tensor<string, []>("attention_scores_29_transpose_x_0"), val = tensor<bool, []>(false)];
+            tensor<bool, []> attention_scores_29_transpose_y_0 = const()[name = tensor<string, []>("attention_scores_29_transpose_y_0"), val = tensor<bool, []>(false)];
+            tensor<int32, [4]> transpose_50_perm_0 = const()[name = tensor<string, []>("transpose_50_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [4]> transpose_51_perm_0 = const()[name = tensor<string, []>("transpose_51_perm_0"), val = tensor<int32, [4]>([0, 2, 3, 1])];
+            tensor<fp32, [1, 12, 32, 128]> transpose_77 = transpose(perm = transpose_51_perm_0, x = x_87)[name = tensor<string, []>("transpose_77")];
+            tensor<fp32, [1, 12, 128, 32]> transpose_78 = transpose(perm = transpose_50_perm_0, x = x_95)[name = tensor<string, []>("transpose_78")];
+            tensor<fp32, [1, 12, 128, 128]> attention_scores_29 = matmul(transpose_x = attention_scores_29_transpose_x_0, transpose_y = attention_scores_29_transpose_y_0, x = transpose_78, y = transpose_77)[name = tensor<string, []>("attention_scores_29")];
+            tensor<fp32, []> _inversed_attention_scores_31_y_0 = const()[name = tensor<string, []>("_inversed_attention_scores_31_y_0"), val = tensor<fp32, []>(0x1.6a09e6p-3)];
+            tensor<fp32, [1, 12, 128, 128]> _inversed_attention_scores_31 = mul(x = attention_scores_29, y = _inversed_attention_scores_31_y_0)[name = tensor<string, []>("_inversed_attention_scores_31")];
+            tensor<fp32, [1, 12, 128, 128]> input_165 = add(x = _inversed_attention_scores_31, y = attention_mask_1)[name = tensor<string, []>("input_165")];
+            tensor<fp32, [1, 12, 128, 128]> input_167 = softmax(axis = var_8, x = input_165)[name = tensor<string, []>("input_167")];
+            tensor<bool, []> context_layer_29_transpose_x_0 = const()[name = tensor<string, []>("context_layer_29_transpose_x_0"), val = tensor<bool, []>(false)];
+            tensor<bool, []> context_layer_29_transpose_y_0 = const()[name = tensor<string, []>("context_layer_29_transpose_y_0"), val = tensor<bool, []>(false)];
+            tensor<fp32, [1, 12, 128, 32]> transpose_79 = transpose(perm = var_656, x = x_91)[name = tensor<string, []>("transpose_79")];
+            tensor<fp32, [1, 12, 128, 32]> context_layer_29 = matmul(transpose_x = context_layer_29_transpose_x_0, transpose_y = context_layer_29_transpose_y_0, x = input_167, y = transpose_79)[name = tensor<string, []>("context_layer_29")];
+            tensor<int32, [4]> var_672 = const()[name = tensor<string, []>("op_672"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [3]> var_677 = const()[name = tensor<string, []>("op_677"), val = tensor<int32, [3]>([1, 128, 384])];
+            tensor<fp32, [1, 128, 12, 32]> transpose_76 = transpose(perm = var_672, x = context_layer_29)[name = tensor<string, []>("transpose_76")];
+            tensor<fp32, [1, 128, 384]> input_169 = reshape(shape = var_677, x = transpose_76)[name = tensor<string, []>("input_169")];
+            tensor<fp32, [1, 128, 384]> linear_45 = linear(bias = model_encoder_layer_7_attention_output_dense_bias, weight = model_encoder_layer_7_attention_output_dense_weight, x = input_169)[name = tensor<string, []>("linear_45")];
+            tensor<fp32, [1, 128, 384]> input_173 = add(x = linear_45, y = input_163)[name = tensor<string, []>("input_173")];
+            tensor<int32, [1]> input_175_axes_0 = const()[name = tensor<string, []>("input_175_axes_0"), val = tensor<int32, [1]>([-1])];
+            tensor<fp32, [1, 128, 384]> input_175 = layer_norm(axes = input_175_axes_0, beta = model_encoder_layer_7_attention_output_LayerNorm_bias, epsilon = var_10, gamma = model_encoder_layer_7_attention_output_LayerNorm_weight, x = input_173)[name = tensor<string, []>("input_175")];
+            tensor<fp32, [1, 128, 1536]> linear_46 = linear(bias = model_encoder_layer_7_intermediate_dense_bias, weight = model_encoder_layer_7_intermediate_dense_weight, x = input_175)[name = tensor<string, []>("linear_46")];
+            tensor<string, []> input_179_mode_0 = const()[name = tensor<string, []>("input_179_mode_0"), val = tensor<string, []>("EXACT")];
+            tensor<fp32, [1, 128, 1536]> input_179 = gelu(mode = input_179_mode_0, x = linear_46)[name = tensor<string, []>("input_179")];
+            tensor<fp32, [1, 128, 384]> linear_47 = linear(bias = model_encoder_layer_7_output_dense_bias, weight = model_encoder_layer_7_output_dense_weight, x = input_179)[name = tensor<string, []>("linear_47")];
+            tensor<fp32, [1, 128, 384]> input_183 = add(x = linear_47, y = input_175)[name = tensor<string, []>("input_183")];
+            tensor<int32, [1]> input_185_axes_0 = const()[name = tensor<string, []>("input_185_axes_0"), val = tensor<int32, [1]>([-1])];
+            tensor<fp32, [1, 128, 384]> input_185 = layer_norm(axes = input_185_axes_0, beta = model_encoder_layer_7_output_LayerNorm_bias, epsilon = var_10, gamma = model_encoder_layer_7_output_LayerNorm_weight, x = input_183)[name = tensor<string, []>("input_185")];
+            tensor<fp32, [1, 128, 384]> linear_48 = linear(bias = model_encoder_layer_8_attention_self_query_bias, weight = model_encoder_layer_8_attention_self_query_weight, x = input_185)[name = tensor<string, []>("linear_48")];
+            tensor<fp32, [1, 128, 384]> linear_49 = linear(bias = model_encoder_layer_8_attention_self_key_bias, weight = model_encoder_layer_8_attention_self_key_weight, x = input_185)[name = tensor<string, []>("linear_49")];
+            tensor<int32, [4]> var_722 = const()[name = tensor<string, []>("op_722"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_99 = reshape(shape = var_722, x = linear_49)[name = tensor<string, []>("x_99")];
+            tensor<fp32, [1, 128, 384]> linear_50 = linear(bias = model_encoder_layer_8_attention_self_value_bias, weight = model_encoder_layer_8_attention_self_value_weight, x = input_185)[name = tensor<string, []>("linear_50")];
+            tensor<int32, [4]> var_731 = const()[name = tensor<string, []>("op_731"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_103 = reshape(shape = var_731, x = linear_50)[name = tensor<string, []>("x_103")];
+            tensor<int32, [4]> var_733 = const()[name = tensor<string, []>("op_733"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [4]> var_737 = const()[name = tensor<string, []>("op_737"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_107 = reshape(shape = var_737, x = linear_48)[name = tensor<string, []>("x_107")];
+            tensor<bool, []> attention_scores_33_transpose_x_0 = const()[name = tensor<string, []>("attention_scores_33_transpose_x_0"), val = tensor<bool, []>(false)];
+            tensor<bool, []> attention_scores_33_transpose_y_0 = const()[name = tensor<string, []>("attention_scores_33_transpose_y_0"), val = tensor<bool, []>(false)];
+            tensor<int32, [4]> transpose_52_perm_0 = const()[name = tensor<string, []>("transpose_52_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [4]> transpose_53_perm_0 = const()[name = tensor<string, []>("transpose_53_perm_0"), val = tensor<int32, [4]>([0, 2, 3, 1])];
+            tensor<fp32, [1, 12, 32, 128]> transpose_73 = transpose(perm = transpose_53_perm_0, x = x_99)[name = tensor<string, []>("transpose_73")];
+            tensor<fp32, [1, 12, 128, 32]> transpose_74 = transpose(perm = transpose_52_perm_0, x = x_107)[name = tensor<string, []>("transpose_74")];
+            tensor<fp32, [1, 12, 128, 128]> attention_scores_33 = matmul(transpose_x = attention_scores_33_transpose_x_0, transpose_y = attention_scores_33_transpose_y_0, x = transpose_74, y = transpose_73)[name = tensor<string, []>("attention_scores_33")];
+            tensor<fp32, []> _inversed_attention_scores_35_y_0 = const()[name = tensor<string, []>("_inversed_attention_scores_35_y_0"), val = tensor<fp32, []>(0x1.6a09e6p-3)];
+            tensor<fp32, [1, 12, 128, 128]> _inversed_attention_scores_35 = mul(x = attention_scores_33, y = _inversed_attention_scores_35_y_0)[name = tensor<string, []>("_inversed_attention_scores_35")];
+            tensor<fp32, [1, 12, 128, 128]> input_187 = add(x = _inversed_attention_scores_35, y = attention_mask_1)[name = tensor<string, []>("input_187")];
+            tensor<fp32, [1, 12, 128, 128]> input_189 = softmax(axis = var_8, x = input_187)[name = tensor<string, []>("input_189")];
+            tensor<bool, []> context_layer_33_transpose_x_0 = const()[name = tensor<string, []>("context_layer_33_transpose_x_0"), val = tensor<bool, []>(false)];
+            tensor<bool, []> context_layer_33_transpose_y_0 = const()[name = tensor<string, []>("context_layer_33_transpose_y_0"), val = tensor<bool, []>(false)];
+            tensor<fp32, [1, 12, 128, 32]> transpose_75 = transpose(perm = var_733, x = x_103)[name = tensor<string, []>("transpose_75")];
+            tensor<fp32, [1, 12, 128, 32]> context_layer_33 = matmul(transpose_x = context_layer_33_transpose_x_0, transpose_y = context_layer_33_transpose_y_0, x = input_189, y = transpose_75)[name = tensor<string, []>("context_layer_33")];
+            tensor<int32, [4]> var_749 = const()[name = tensor<string, []>("op_749"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [3]> var_754 = const()[name = tensor<string, []>("op_754"), val = tensor<int32, [3]>([1, 128, 384])];
+            tensor<fp32, [1, 128, 12, 32]> transpose_72 = transpose(perm = var_749, x = context_layer_33)[name = tensor<string, []>("transpose_72")];
+            tensor<fp32, [1, 128, 384]> input_191 = reshape(shape = var_754, x = transpose_72)[name = tensor<string, []>("input_191")];
+            tensor<fp32, [1, 128, 384]> linear_51 = linear(bias = model_encoder_layer_8_attention_output_dense_bias, weight = model_encoder_layer_8_attention_output_dense_weight, x = input_191)[name = tensor<string, []>("linear_51")];
+            tensor<fp32, [1, 128, 384]> input_195 = add(x = linear_51, y = input_185)[name = tensor<string, []>("input_195")];
+            tensor<int32, [1]> input_197_axes_0 = const()[name = tensor<string, []>("input_197_axes_0"), val = tensor<int32, [1]>([-1])];
+            tensor<fp32, [1, 128, 384]> input_197 = layer_norm(axes = input_197_axes_0, beta = model_encoder_layer_8_attention_output_LayerNorm_bias, epsilon = var_10, gamma = model_encoder_layer_8_attention_output_LayerNorm_weight, x = input_195)[name = tensor<string, []>("input_197")];
+            tensor<fp32, [1, 128, 1536]> linear_52 = linear(bias = model_encoder_layer_8_intermediate_dense_bias, weight = model_encoder_layer_8_intermediate_dense_weight, x = input_197)[name = tensor<string, []>("linear_52")];
+            tensor<string, []> input_201_mode_0 = const()[name = tensor<string, []>("input_201_mode_0"), val = tensor<string, []>("EXACT")];
+            tensor<fp32, [1, 128, 1536]> input_201 = gelu(mode = input_201_mode_0, x = linear_52)[name = tensor<string, []>("input_201")];
+            tensor<fp32, [1, 128, 384]> linear_53 = linear(bias = model_encoder_layer_8_output_dense_bias, weight = model_encoder_layer_8_output_dense_weight, x = input_201)[name = tensor<string, []>("linear_53")];
+            tensor<fp32, [1, 128, 384]> input_205 = add(x = linear_53, y = input_197)[name = tensor<string, []>("input_205")];
+            tensor<int32, [1]> input_207_axes_0 = const()[name = tensor<string, []>("input_207_axes_0"), val = tensor<int32, [1]>([-1])];
+            tensor<fp32, [1, 128, 384]> input_207 = layer_norm(axes = input_207_axes_0, beta = model_encoder_layer_8_output_LayerNorm_bias, epsilon = var_10, gamma = model_encoder_layer_8_output_LayerNorm_weight, x = input_205)[name = tensor<string, []>("input_207")];
+            tensor<fp32, [1, 128, 384]> linear_54 = linear(bias = model_encoder_layer_9_attention_self_query_bias, weight = model_encoder_layer_9_attention_self_query_weight, x = input_207)[name = tensor<string, []>("linear_54")];
+            tensor<fp32, [1, 128, 384]> linear_55 = linear(bias = model_encoder_layer_9_attention_self_key_bias, weight = model_encoder_layer_9_attention_self_key_weight, x = input_207)[name = tensor<string, []>("linear_55")];
+            tensor<int32, [4]> var_799 = const()[name = tensor<string, []>("op_799"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_111 = reshape(shape = var_799, x = linear_55)[name = tensor<string, []>("x_111")];
+            tensor<fp32, [1, 128, 384]> linear_56 = linear(bias = model_encoder_layer_9_attention_self_value_bias, weight = model_encoder_layer_9_attention_self_value_weight, x = input_207)[name = tensor<string, []>("linear_56")];
+            tensor<int32, [4]> var_808 = const()[name = tensor<string, []>("op_808"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_115 = reshape(shape = var_808, x = linear_56)[name = tensor<string, []>("x_115")];
+            tensor<int32, [4]> var_810 = const()[name = tensor<string, []>("op_810"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [4]> var_814 = const()[name = tensor<string, []>("op_814"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_119 = reshape(shape = var_814, x = linear_54)[name = tensor<string, []>("x_119")];
+            tensor<bool, []> attention_scores_37_transpose_x_0 = const()[name = tensor<string, []>("attention_scores_37_transpose_x_0"), val = tensor<bool, []>(false)];
+            tensor<bool, []> attention_scores_37_transpose_y_0 = const()[name = tensor<string, []>("attention_scores_37_transpose_y_0"), val = tensor<bool, []>(false)];
+            tensor<int32, [4]> transpose_54_perm_0 = const()[name = tensor<string, []>("transpose_54_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [4]> transpose_55_perm_0 = const()[name = tensor<string, []>("transpose_55_perm_0"), val = tensor<int32, [4]>([0, 2, 3, 1])];
+            tensor<fp32, [1, 12, 32, 128]> transpose_69 = transpose(perm = transpose_55_perm_0, x = x_111)[name = tensor<string, []>("transpose_69")];
+            tensor<fp32, [1, 12, 128, 32]> transpose_70 = transpose(perm = transpose_54_perm_0, x = x_119)[name = tensor<string, []>("transpose_70")];
+            tensor<fp32, [1, 12, 128, 128]> attention_scores_37 = matmul(transpose_x = attention_scores_37_transpose_x_0, transpose_y = attention_scores_37_transpose_y_0, x = transpose_70, y = transpose_69)[name = tensor<string, []>("attention_scores_37")];
+            tensor<fp32, []> _inversed_attention_scores_39_y_0 = const()[name = tensor<string, []>("_inversed_attention_scores_39_y_0"), val = tensor<fp32, []>(0x1.6a09e6p-3)];
+            tensor<fp32, [1, 12, 128, 128]> _inversed_attention_scores_39 = mul(x = attention_scores_37, y = _inversed_attention_scores_39_y_0)[name = tensor<string, []>("_inversed_attention_scores_39")];
+            tensor<fp32, [1, 12, 128, 128]> input_209 = add(x = _inversed_attention_scores_39, y = attention_mask_1)[name = tensor<string, []>("input_209")];
+            tensor<fp32, [1, 12, 128, 128]> input_211 = softmax(axis = var_8, x = input_209)[name = tensor<string, []>("input_211")];
+            tensor<bool, []> context_layer_37_transpose_x_0 = const()[name = tensor<string, []>("context_layer_37_transpose_x_0"), val = tensor<bool, []>(false)];
+            tensor<bool, []> context_layer_37_transpose_y_0 = const()[name = tensor<string, []>("context_layer_37_transpose_y_0"), val = tensor<bool, []>(false)];
+            tensor<fp32, [1, 12, 128, 32]> transpose_71 = transpose(perm = var_810, x = x_115)[name = tensor<string, []>("transpose_71")];
+            tensor<fp32, [1, 12, 128, 32]> context_layer_37 = matmul(transpose_x = context_layer_37_transpose_x_0, transpose_y = context_layer_37_transpose_y_0, x = input_211, y = transpose_71)[name = tensor<string, []>("context_layer_37")];
+            tensor<int32, [4]> var_826 = const()[name = tensor<string, []>("op_826"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [3]> var_831 = const()[name = tensor<string, []>("op_831"), val = tensor<int32, [3]>([1, 128, 384])];
+            tensor<fp32, [1, 128, 12, 32]> transpose_68 = transpose(perm = var_826, x = context_layer_37)[name = tensor<string, []>("transpose_68")];
+            tensor<fp32, [1, 128, 384]> input_213 = reshape(shape = var_831, x = transpose_68)[name = tensor<string, []>("input_213")];
+            tensor<fp32, [1, 128, 384]> linear_57 = linear(bias = model_encoder_layer_9_attention_output_dense_bias, weight = model_encoder_layer_9_attention_output_dense_weight, x = input_213)[name = tensor<string, []>("linear_57")];
+            tensor<fp32, [1, 128, 384]> input_217 = add(x = linear_57, y = input_207)[name = tensor<string, []>("input_217")];
+            tensor<int32, [1]> input_219_axes_0 = const()[name = tensor<string, []>("input_219_axes_0"), val = tensor<int32, [1]>([-1])];
+            tensor<fp32, [1, 128, 384]> input_219 = layer_norm(axes = input_219_axes_0, beta = model_encoder_layer_9_attention_output_LayerNorm_bias, epsilon = var_10, gamma = model_encoder_layer_9_attention_output_LayerNorm_weight, x = input_217)[name = tensor<string, []>("input_219")];
+            tensor<fp32, [1, 128, 1536]> linear_58 = linear(bias = model_encoder_layer_9_intermediate_dense_bias, weight = model_encoder_layer_9_intermediate_dense_weight, x = input_219)[name = tensor<string, []>("linear_58")];
+            tensor<string, []> input_223_mode_0 = const()[name = tensor<string, []>("input_223_mode_0"), val = tensor<string, []>("EXACT")];
+            tensor<fp32, [1, 128, 1536]> input_223 = gelu(mode = input_223_mode_0, x = linear_58)[name = tensor<string, []>("input_223")];
+            tensor<fp32, [1, 128, 384]> linear_59 = linear(bias = model_encoder_layer_9_output_dense_bias, weight = model_encoder_layer_9_output_dense_weight, x = input_223)[name = tensor<string, []>("linear_59")];
+            tensor<fp32, [1, 128, 384]> input_227 = add(x = linear_59, y = input_219)[name = tensor<string, []>("input_227")];
+            tensor<int32, [1]> input_229_axes_0 = const()[name = tensor<string, []>("input_229_axes_0"), val = tensor<int32, [1]>([-1])];
+            tensor<fp32, [1, 128, 384]> input_229 = layer_norm(axes = input_229_axes_0, beta = model_encoder_layer_9_output_LayerNorm_bias, epsilon = var_10, gamma = model_encoder_layer_9_output_LayerNorm_weight, x = input_227)[name = tensor<string, []>("input_229")];
+            tensor<fp32, [1, 128, 384]> linear_60 = linear(bias = model_encoder_layer_10_attention_self_query_bias, weight = model_encoder_layer_10_attention_self_query_weight, x = input_229)[name = tensor<string, []>("linear_60")];
+            tensor<fp32, [1, 128, 384]> linear_61 = linear(bias = model_encoder_layer_10_attention_self_key_bias, weight = model_encoder_layer_10_attention_self_key_weight, x = input_229)[name = tensor<string, []>("linear_61")];
+            tensor<int32, [4]> var_876 = const()[name = tensor<string, []>("op_876"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_123 = reshape(shape = var_876, x = linear_61)[name = tensor<string, []>("x_123")];
+            tensor<fp32, [1, 128, 384]> linear_62 = linear(bias = model_encoder_layer_10_attention_self_value_bias, weight = model_encoder_layer_10_attention_self_value_weight, x = input_229)[name = tensor<string, []>("linear_62")];
+            tensor<int32, [4]> var_885 = const()[name = tensor<string, []>("op_885"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_127 = reshape(shape = var_885, x = linear_62)[name = tensor<string, []>("x_127")];
+            tensor<int32, [4]> var_887 = const()[name = tensor<string, []>("op_887"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [4]> var_891 = const()[name = tensor<string, []>("op_891"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_131 = reshape(shape = var_891, x = linear_60)[name = tensor<string, []>("x_131")];
+            tensor<bool, []> attention_scores_41_transpose_x_0 = const()[name = tensor<string, []>("attention_scores_41_transpose_x_0"), val = tensor<bool, []>(false)];
+            tensor<bool, []> attention_scores_41_transpose_y_0 = const()[name = tensor<string, []>("attention_scores_41_transpose_y_0"), val = tensor<bool, []>(false)];
+            tensor<int32, [4]> transpose_56_perm_0 = const()[name = tensor<string, []>("transpose_56_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [4]> transpose_57_perm_0 = const()[name = tensor<string, []>("transpose_57_perm_0"), val = tensor<int32, [4]>([0, 2, 3, 1])];
+            tensor<fp32, [1, 12, 32, 128]> transpose_65 = transpose(perm = transpose_57_perm_0, x = x_123)[name = tensor<string, []>("transpose_65")];
+            tensor<fp32, [1, 12, 128, 32]> transpose_66 = transpose(perm = transpose_56_perm_0, x = x_131)[name = tensor<string, []>("transpose_66")];
+            tensor<fp32, [1, 12, 128, 128]> attention_scores_41 = matmul(transpose_x = attention_scores_41_transpose_x_0, transpose_y = attention_scores_41_transpose_y_0, x = transpose_66, y = transpose_65)[name = tensor<string, []>("attention_scores_41")];
+            tensor<fp32, []> _inversed_attention_scores_43_y_0 = const()[name = tensor<string, []>("_inversed_attention_scores_43_y_0"), val = tensor<fp32, []>(0x1.6a09e6p-3)];
+            tensor<fp32, [1, 12, 128, 128]> _inversed_attention_scores_43 = mul(x = attention_scores_41, y = _inversed_attention_scores_43_y_0)[name = tensor<string, []>("_inversed_attention_scores_43")];
+            tensor<fp32, [1, 12, 128, 128]> input_231 = add(x = _inversed_attention_scores_43, y = attention_mask_1)[name = tensor<string, []>("input_231")];
+            tensor<fp32, [1, 12, 128, 128]> input_233 = softmax(axis = var_8, x = input_231)[name = tensor<string, []>("input_233")];
+            tensor<bool, []> context_layer_41_transpose_x_0 = const()[name = tensor<string, []>("context_layer_41_transpose_x_0"), val = tensor<bool, []>(false)];
+            tensor<bool, []> context_layer_41_transpose_y_0 = const()[name = tensor<string, []>("context_layer_41_transpose_y_0"), val = tensor<bool, []>(false)];
+            tensor<fp32, [1, 12, 128, 32]> transpose_67 = transpose(perm = var_887, x = x_127)[name = tensor<string, []>("transpose_67")];
+            tensor<fp32, [1, 12, 128, 32]> context_layer_41 = matmul(transpose_x = context_layer_41_transpose_x_0, transpose_y = context_layer_41_transpose_y_0, x = input_233, y = transpose_67)[name = tensor<string, []>("context_layer_41")];
+            tensor<int32, [4]> var_903 = const()[name = tensor<string, []>("op_903"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [3]> var_908 = const()[name = tensor<string, []>("op_908"), val = tensor<int32, [3]>([1, 128, 384])];
+            tensor<fp32, [1, 128, 12, 32]> transpose_64 = transpose(perm = var_903, x = context_layer_41)[name = tensor<string, []>("transpose_64")];
+            tensor<fp32, [1, 128, 384]> input_235 = reshape(shape = var_908, x = transpose_64)[name = tensor<string, []>("input_235")];
+            tensor<fp32, [1, 128, 384]> linear_63 = linear(bias = model_encoder_layer_10_attention_output_dense_bias, weight = model_encoder_layer_10_attention_output_dense_weight, x = input_235)[name = tensor<string, []>("linear_63")];
+            tensor<fp32, [1, 128, 384]> input_239 = add(x = linear_63, y = input_229)[name = tensor<string, []>("input_239")];
+            tensor<int32, [1]> input_241_axes_0 = const()[name = tensor<string, []>("input_241_axes_0"), val = tensor<int32, [1]>([-1])];
+            tensor<fp32, [1, 128, 384]> input_241 = layer_norm(axes = input_241_axes_0, beta = model_encoder_layer_10_attention_output_LayerNorm_bias, epsilon = var_10, gamma = model_encoder_layer_10_attention_output_LayerNorm_weight, x = input_239)[name = tensor<string, []>("input_241")];
+            tensor<fp32, [1, 128, 1536]> linear_64 = linear(bias = model_encoder_layer_10_intermediate_dense_bias, weight = model_encoder_layer_10_intermediate_dense_weight, x = input_241)[name = tensor<string, []>("linear_64")];
+            tensor<string, []> input_245_mode_0 = const()[name = tensor<string, []>("input_245_mode_0"), val = tensor<string, []>("EXACT")];
+            tensor<fp32, [1, 128, 1536]> input_245 = gelu(mode = input_245_mode_0, x = linear_64)[name = tensor<string, []>("input_245")];
+            tensor<fp32, [1, 128, 384]> linear_65 = linear(bias = model_encoder_layer_10_output_dense_bias, weight = model_encoder_layer_10_output_dense_weight, x = input_245)[name = tensor<string, []>("linear_65")];
+            tensor<fp32, [1, 128, 384]> input_249 = add(x = linear_65, y = input_241)[name = tensor<string, []>("input_249")];
+            tensor<int32, [1]> input_251_axes_0 = const()[name = tensor<string, []>("input_251_axes_0"), val = tensor<int32, [1]>([-1])];
+            tensor<fp32, [1, 128, 384]> input_251 = layer_norm(axes = input_251_axes_0, beta = model_encoder_layer_10_output_LayerNorm_bias, epsilon = var_10, gamma = model_encoder_layer_10_output_LayerNorm_weight, x = input_249)[name = tensor<string, []>("input_251")];
+            tensor<fp32, [1, 128, 384]> linear_66 = linear(bias = model_encoder_layer_11_attention_self_query_bias, weight = model_encoder_layer_11_attention_self_query_weight, x = input_251)[name = tensor<string, []>("linear_66")];
+            tensor<fp32, [1, 128, 384]> linear_67 = linear(bias = model_encoder_layer_11_attention_self_key_bias, weight = model_encoder_layer_11_attention_self_key_weight, x = input_251)[name = tensor<string, []>("linear_67")];
+            tensor<int32, [4]> var_953 = const()[name = tensor<string, []>("op_953"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_135 = reshape(shape = var_953, x = linear_67)[name = tensor<string, []>("x_135")];
+            tensor<fp32, [1, 128, 384]> linear_68 = linear(bias = model_encoder_layer_11_attention_self_value_bias, weight = model_encoder_layer_11_attention_self_value_weight, x = input_251)[name = tensor<string, []>("linear_68")];
+            tensor<int32, [4]> var_962 = const()[name = tensor<string, []>("op_962"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x_139 = reshape(shape = var_962, x = linear_68)[name = tensor<string, []>("x_139")];
+            tensor<int32, [4]> var_964 = const()[name = tensor<string, []>("op_964"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [4]> var_968 = const()[name = tensor<string, []>("op_968"), val = tensor<int32, [4]>([1, 128, 12, 32])];
+            tensor<fp32, [1, 128, 12, 32]> x = reshape(shape = var_968, x = linear_66)[name = tensor<string, []>("x")];
+            tensor<bool, []> attention_scores_45_transpose_x_0 = const()[name = tensor<string, []>("attention_scores_45_transpose_x_0"), val = tensor<bool, []>(false)];
+            tensor<bool, []> attention_scores_45_transpose_y_0 = const()[name = tensor<string, []>("attention_scores_45_transpose_y_0"), val = tensor<bool, []>(false)];
+            tensor<int32, [4]> transpose_58_perm_0 = const()[name = tensor<string, []>("transpose_58_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [4]> transpose_59_perm_0 = const()[name = tensor<string, []>("transpose_59_perm_0"), val = tensor<int32, [4]>([0, 2, 3, 1])];
+            tensor<fp32, [1, 12, 32, 128]> transpose_61 = transpose(perm = transpose_59_perm_0, x = x_135)[name = tensor<string, []>("transpose_61")];
+            tensor<fp32, [1, 12, 128, 32]> transpose_62 = transpose(perm = transpose_58_perm_0, x = x)[name = tensor<string, []>("transpose_62")];
+            tensor<fp32, [1, 12, 128, 128]> attention_scores_45 = matmul(transpose_x = attention_scores_45_transpose_x_0, transpose_y = attention_scores_45_transpose_y_0, x = transpose_62, y = transpose_61)[name = tensor<string, []>("attention_scores_45")];
+            tensor<fp32, []> _inversed_attention_scores_y_0 = const()[name = tensor<string, []>("_inversed_attention_scores_y_0"), val = tensor<fp32, []>(0x1.6a09e6p-3)];
+            tensor<fp32, [1, 12, 128, 128]> _inversed_attention_scores = mul(x = attention_scores_45, y = _inversed_attention_scores_y_0)[name = tensor<string, []>("_inversed_attention_scores")];
+            tensor<fp32, [1, 12, 128, 128]> input_253 = add(x = _inversed_attention_scores, y = attention_mask_1)[name = tensor<string, []>("input_253")];
+            tensor<fp32, [1, 12, 128, 128]> input_255 = softmax(axis = var_8, x = input_253)[name = tensor<string, []>("input_255")];
+            tensor<bool, []> context_layer_45_transpose_x_0 = const()[name = tensor<string, []>("context_layer_45_transpose_x_0"), val = tensor<bool, []>(false)];
+            tensor<bool, []> context_layer_45_transpose_y_0 = const()[name = tensor<string, []>("context_layer_45_transpose_y_0"), val = tensor<bool, []>(false)];
+            tensor<fp32, [1, 12, 128, 32]> transpose_63 = transpose(perm = var_964, x = x_139)[name = tensor<string, []>("transpose_63")];
+            tensor<fp32, [1, 12, 128, 32]> context_layer_45 = matmul(transpose_x = context_layer_45_transpose_x_0, transpose_y = context_layer_45_transpose_y_0, x = input_255, y = transpose_63)[name = tensor<string, []>("context_layer_45")];
+            tensor<int32, [4]> var_980 = const()[name = tensor<string, []>("op_980"), val = tensor<int32, [4]>([0, 2, 1, 3])];
+            tensor<int32, [3]> var_985 = const()[name = tensor<string, []>("op_985"), val = tensor<int32, [3]>([1, 128, 384])];
+            tensor<fp32, [1, 128, 12, 32]> transpose_60 = transpose(perm = var_980, x = context_layer_45)[name = tensor<string, []>("transpose_60")];
+            tensor<fp32, [1, 128, 384]> input_257 = reshape(shape = var_985, x = transpose_60)[name = tensor<string, []>("input_257")];
+            tensor<fp32, [1, 128, 384]> linear_69 = linear(bias = model_encoder_layer_11_attention_output_dense_bias, weight = model_encoder_layer_11_attention_output_dense_weight, x = input_257)[name = tensor<string, []>("linear_69")];
+            tensor<fp32, [1, 128, 384]> input_261 = add(x = linear_69, y = input_251)[name = tensor<string, []>("input_261")];
+            tensor<int32, [1]> input_263_axes_0 = const()[name = tensor<string, []>("input_263_axes_0"), val = tensor<int32, [1]>([-1])];
+            tensor<fp32, [1, 128, 384]> input_263 = layer_norm(axes = input_263_axes_0, beta = model_encoder_layer_11_attention_output_LayerNorm_bias, epsilon = var_10, gamma = model_encoder_layer_11_attention_output_LayerNorm_weight, x = input_261)[name = tensor<string, []>("input_263")];
+            tensor<fp32, [1, 128, 1536]> linear_70 = linear(bias = model_encoder_layer_11_intermediate_dense_bias, weight = model_encoder_layer_11_intermediate_dense_weight, x = input_263)[name = tensor<string, []>("linear_70")];
+            tensor<string, []> input_267_mode_0 = const()[name = tensor<string, []>("input_267_mode_0"), val = tensor<string, []>("EXACT")];
+            tensor<fp32, [1, 128, 1536]> input_267 = gelu(mode = input_267_mode_0, x = linear_70)[name = tensor<string, []>("input_267")];
+            tensor<fp32, [1, 128, 384]> linear_71 = linear(bias = model_encoder_layer_11_output_dense_bias, weight = model_encoder_layer_11_output_dense_weight, x = input_267)[name = tensor<string, []>("linear_71")];
+            tensor<fp32, [1, 128, 384]> input_271 = add(x = linear_71, y = input_263)[name = tensor<string, []>("input_271")];
+            tensor<int32, [1]> hidden_states_axes_0 = const()[name = tensor<string, []>("hidden_states_axes_0"), val = tensor<int32, [1]>([-1])];
+            tensor<fp32, [1, 128, 384]> last_hidden_state = layer_norm(axes = hidden_states_axes_0, beta = model_encoder_layer_11_output_LayerNorm_bias, epsilon = var_10, gamma = model_encoder_layer_11_output_LayerNorm_weight, x = input_271)[name = tensor<string, []>("hidden_states")];
+            tensor<int32, [3]> input_273_begin_0 = const()[name = tensor<string, []>("input_273_begin_0"), val = tensor<int32, [3]>([0, 0, 0])];
+            tensor<int32, [3]> input_273_end_0 = const()[name = tensor<string, []>("input_273_end_0"), val = tensor<int32, [3]>([1, 1, 384])];
+            tensor<bool, [3]> input_273_end_mask_0 = const()[name = tensor<string, []>("input_273_end_mask_0"), val = tensor<bool, [3]>([true, false, true])];
+            tensor<bool, [3]> input_273_squeeze_mask_0 = const()[name = tensor<string, []>("input_273_squeeze_mask_0"), val = tensor<bool, [3]>([false, true, false])];
+            tensor<fp32, [1, 384]> input_273 = slice_by_index(begin = input_273_begin_0, end = input_273_end_0, end_mask = input_273_end_mask_0, squeeze_mask = input_273_squeeze_mask_0, x = last_hidden_state)[name = tensor<string, []>("input_273")];
+            tensor<fp32, [1, 384]> linear_72 = linear(bias = model_pooler_dense_bias, weight = model_pooler_dense_weight, x = input_273)[name = tensor<string, []>("linear_72")];
+            tensor<fp32, [1, 384]> pooler_output = tanh(x = linear_72)[name = tensor<string, []>("op_1020")];
+        } -> (last_hidden_state, pooler_output);
+}
\ No newline at end of file
diff --git a/Sources/SwiftNLP/Models/float32_model.mlmodelc/weights/weight.bin b/Sources/SwiftNLP/Models/float32_model.mlmodelc/weights/weight.bin
new file mode 100644
index 0000000000000000000000000000000000000000..71d86e38eb7ae4707228b8af0888c01ab9b7c4a5
Binary files /dev/null and b/Sources/SwiftNLP/Models/float32_model.mlmodelc/weights/weight.bin differ
diff --git a/Sources/SwiftNLP/Resources/float32_model.mlpackage/Data/com.apple.CoreML/model.mlmodel b/Sources/SwiftNLP/Resources/float32_model.mlpackage/Data/com.apple.CoreML/model.mlmodel
new file mode 100644
index 0000000000000000000000000000000000000000..a23498c8857990e3a536ab50919b8358fe2fd143
--- /dev/null
+++ b/Sources/SwiftNLP/Resources/float32_model.mlpackage/Data/com.apple.CoreML/model.mlmodel
@@ -0,0 +1,11270 @@
+ý
+L
+	input_ids2Indices of input sequence tokens in the vocabulary*	
+€ €
+w
+attention_maskXMask to avoid performing attention on padding token indices (1 = not masked, 0 = masked)*	
+€ €Rj
+last_hidden_stateFSequence of hidden-states at the output of the last layer of the model
*
+€€ €RX
+
pooler_output:Last layer hidden-state of the first token of the sequence*	
+€ €¢ì
+'thenlper/gte-small (feature-extraction)¢3
+#com.github.apple.coremltools.sourcetorch==2.1.0¢+
+$com.github.apple.coremltools.version7.1¢:
++com.github.apple.coremltools.source_dialectTorchScript¢3
+co.huggingface.exporters.namethenlper/gte-small¢3
+co.huggingface.exporters.taskfeature-extraction¢2
+%co.huggingface.exporters.architecture	BertModel¢-
+"co.huggingface.exporters.frameworkpytorch¢-
+"co.huggingface.exporters.precisionfloat32¢
+transformers_version4.28.1²¨²º°
+main°°
+ 
+	input_ids
+
+
+€
+%
+attention_mask
+
+
+€CoreML5Ú¯
+CoreML5ͯlast_hidden_state
pooler_outputÌ
+const@
+'model_embeddings_word_embeddings_weight
+
+ºî
+€*=
+name5
+-
++")
+'model_embeddings_word_embeddings_weight*B
+val;
+
+ºî
+€*"
+@model_path/weights/weight.bin@¯
+const0
+model_embeddings_LayerNorm_bias
+
+€*5
+name-
+%
+#"!
+model_embeddings_LayerNorm_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€¹­³
+const2
+!model_embeddings_LayerNorm_weight
+
+€*7
+name/
+'
+%"#
+!model_embeddings_LayerNorm_weight*=
+val6
+
+€*%
+@model_path/weights/weight.binÀÅ­Ï
+const@
+/model_encoder_layer_0_attention_self_query_bias
+
+€*E
+name=
+5
+3"1
+/model_encoder_layer_0_attention_self_query_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€Ò­á
+constI
+1model_encoder_layer_0_attention_self_query_weight
+
+€
+€*G
+name?
+7
+5"3
+1model_encoder_layer_0_attention_self_query_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀÞ­Ë
+const>
+-model_encoder_layer_0_attention_self_key_bias
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_0_attention_self_key_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€ßÑÝ
+constG
+/model_encoder_layer_0_attention_self_key_weight
+
+€
+€*E
+name=
+5
+3"1
+/model_encoder_layer_0_attention_self_key_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀëÑÏ
+const@
+/model_encoder_layer_0_attention_self_value_bias
+
+€*E
+name=
+5
+3"1
+/model_encoder_layer_0_attention_self_value_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€ìõá
+constI
+1model_encoder_layer_0_attention_self_value_weight
+
+€
+€*G
+name?
+7
+5"3
+1model_encoder_layer_0_attention_self_value_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀøõÓ
+constB
+1model_encoder_layer_0_attention_output_dense_bias
+
+€*G
+name?
+7
+5"3
+1model_encoder_layer_0_attention_output_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€ù™å
+constK
+3model_encoder_layer_0_attention_output_dense_weight
+
+€
+€*I
+nameA
+9
+7"5
+3model_encoder_layer_0_attention_output_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ…šÛ
+constF
+5model_encoder_layer_0_attention_output_LayerNorm_bias
+
+€*K
+nameC
+;
+9"7
+5model_encoder_layer_0_attention_output_LayerNorm_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€†¾ß
+constH
+7model_encoder_layer_0_attention_output_LayerNorm_weight
+
+€*M
+nameE
+=
+;"9
+7model_encoder_layer_0_attention_output_LayerNorm_weight*=
+val6
+
+€*%
+@model_path/weights/weight.binÀ’¾Ë
+const>
+-model_encoder_layer_0_intermediate_dense_bias
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_0_intermediate_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€Ÿ¾Ý
+constG
+/model_encoder_layer_0_intermediate_dense_weight
+
+€
+€*E
+name=
+5
+3"1
+/model_encoder_layer_0_intermediate_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀϾ¿
+const8
+'model_encoder_layer_0_output_dense_bias
+
+€*=
+name5
+-
++")
+'model_encoder_layer_0_output_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€ÐÎÑ
+constA
+)model_encoder_layer_0_output_dense_weight
+
+€
+€*?
+name7
+/
+-"+
+)model_encoder_layer_0_output_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀÜÎÇ
+const<
++model_encoder_layer_0_output_LayerNorm_bias
+
+€*A
+name9
+1
+/"-
++model_encoder_layer_0_output_LayerNorm_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€ÝÞË
+const>
+-model_encoder_layer_0_output_LayerNorm_weight
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_0_output_LayerNorm_weight*=
+val6
+
+€*%
+@model_path/weights/weight.binÀéÞÏ
+const@
+/model_encoder_layer_1_attention_self_query_bias
+
+€*E
+name=
+5
+3"1
+/model_encoder_layer_1_attention_self_query_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€öÞá
+constI
+1model_encoder_layer_1_attention_self_query_weight
+
+€
+€*G
+name?
+7
+5"3
+1model_encoder_layer_1_attention_self_query_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ‚ßË
+const>
+-model_encoder_layer_1_attention_self_key_bias
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_1_attention_self_key_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€ƒƒÝ
+constG
+/model_encoder_layer_1_attention_self_key_weight
+
+€
+€*E
+name=
+5
+3"1
+/model_encoder_layer_1_attention_self_key_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀƒÏ
+const@
+/model_encoder_layer_1_attention_self_value_bias
+
+€*E
+name=
+5
+3"1
+/model_encoder_layer_1_attention_self_value_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€§á
+constI
+1model_encoder_layer_1_attention_self_value_weight
+
+€
+€*G
+name?
+7
+5"3
+1model_encoder_layer_1_attention_self_value_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀœ§Ó
+constB
+1model_encoder_layer_1_attention_output_dense_bias
+
+€*G
+name?
+7
+5"3
+1model_encoder_layer_1_attention_output_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€Ëå
+constK
+3model_encoder_layer_1_attention_output_dense_weight
+
+€
+€*I
+nameA
+9
+7"5
+3model_encoder_layer_1_attention_output_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ©ËÛ
+constF
+5model_encoder_layer_1_attention_output_LayerNorm_bias
+
+€*K
+nameC
+;
+9"7
+5model_encoder_layer_1_attention_output_LayerNorm_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€ªïß
+constH
+7model_encoder_layer_1_attention_output_LayerNorm_weight
+
+€*M
+nameE
+=
+;"9
+7model_encoder_layer_1_attention_output_LayerNorm_weight*=
+val6
+
+€*%
+@model_path/weights/weight.binÀ¶ïË
+const>
+-model_encoder_layer_1_intermediate_dense_bias
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_1_intermediate_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€ÃïÝ
+constG
+/model_encoder_layer_1_intermediate_dense_weight
+
+€
+€*E
+name=
+5
+3"1
+/model_encoder_layer_1_intermediate_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀóï¿
+const8
+'model_encoder_layer_1_output_dense_bias
+
+€*=
+name5
+-
++")
+'model_encoder_layer_1_output_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€ôÿÑ
+constA
+)model_encoder_layer_1_output_dense_weight
+
+€
+€*?
+name7
+/
+-"+
+)model_encoder_layer_1_output_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ€€Ç
+const<
++model_encoder_layer_1_output_LayerNorm_bias
+
+€*A
+name9
+1
+/"-
++model_encoder_layer_1_output_LayerNorm_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€Ë
+const>
+-model_encoder_layer_1_output_LayerNorm_weight
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_1_output_LayerNorm_weight*=
+val6
+
+€*%
+@model_path/weights/weight.binÀÏ
+const@
+/model_encoder_layer_2_attention_self_query_bias
+
+€*E
+name=
+5
+3"1
+/model_encoder_layer_2_attention_self_query_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€šá
+constI
+1model_encoder_layer_2_attention_self_query_weight
+
+€
+€*G
+name?
+7
+5"3
+1model_encoder_layer_2_attention_self_query_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ¦Ë
+const>
+-model_encoder_layer_2_attention_self_key_bias
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_2_attention_self_key_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€§´Ý
+constG
+/model_encoder_layer_2_attention_self_key_weight
+
+€
+€*E
+name=
+5
+3"1
+/model_encoder_layer_2_attention_self_key_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ³´Ï
+const@
+/model_encoder_layer_2_attention_self_value_bias
+
+€*E
+name=
+5
+3"1
+/model_encoder_layer_2_attention_self_value_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€´Øá
+constI
+1model_encoder_layer_2_attention_self_value_weight
+
+€
+€*G
+name?
+7
+5"3
+1model_encoder_layer_2_attention_self_value_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀÀØÓ
+constB
+1model_encoder_layer_2_attention_output_dense_bias
+
+€*G
+name?
+7
+5"3
+1model_encoder_layer_2_attention_output_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€Áüå
+constK
+3model_encoder_layer_2_attention_output_dense_weight
+
+€
+€*I
+nameA
+9
+7"5
+3model_encoder_layer_2_attention_output_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀÍüÛ
+constF
+5model_encoder_layer_2_attention_output_LayerNorm_bias
+
+€*K
+nameC
+;
+9"7
+5model_encoder_layer_2_attention_output_LayerNorm_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€Î ß
+constH
+7model_encoder_layer_2_attention_output_LayerNorm_weight
+
+€*M
+nameE
+=
+;"9
+7model_encoder_layer_2_attention_output_LayerNorm_weight*=
+val6
+
+€*%
+@model_path/weights/weight.binÀÚ Ë
+const>
+-model_encoder_layer_2_intermediate_dense_bias
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_2_intermediate_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€ç Ý
+constG
+/model_encoder_layer_2_intermediate_dense_weight
+
+€
+€*E
+name=
+5
+3"1
+/model_encoder_layer_2_intermediate_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ—¡¿
+const8
+'model_encoder_layer_2_output_dense_bias
+
+€*=
+name5
+-
++")
+'model_encoder_layer_2_output_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€˜±Ñ
+constA
+)model_encoder_layer_2_output_dense_weight
+
+€
+€*?
+name7
+/
+-"+
+)model_encoder_layer_2_output_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ¤±Ç
+const<
++model_encoder_layer_2_output_LayerNorm_bias
+
+€*A
+name9
+1
+/"-
++model_encoder_layer_2_output_LayerNorm_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€¥Á Ë
+const>
+-model_encoder_layer_2_output_LayerNorm_weight
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_2_output_LayerNorm_weight*=
+val6
+
+€*%
+@model_path/weights/weight.binÀ±Á Ï
+const@
+/model_encoder_layer_3_attention_self_query_bias
+
+€*E
+name=
+5
+3"1
+/model_encoder_layer_3_attention_self_query_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€¾Á á
+constI
+1model_encoder_layer_3_attention_self_query_weight
+
+€
+€*G
+name?
+7
+5"3
+1model_encoder_layer_3_attention_self_query_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀÊÁ Ë
+const>
+-model_encoder_layer_3_attention_self_key_bias
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_3_attention_self_key_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€Ëå Ý
+constG
+/model_encoder_layer_3_attention_self_key_weight
+
+€
+€*E
+name=
+5
+3"1
+/model_encoder_layer_3_attention_self_key_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ×å Ï
+const@
+/model_encoder_layer_3_attention_self_value_bias
+
+€*E
+name=
+5
+3"1
+/model_encoder_layer_3_attention_self_value_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€Ø‰!á
+constI
+1model_encoder_layer_3_attention_self_value_weight
+
+€
+€*G
+name?
+7
+5"3
+1model_encoder_layer_3_attention_self_value_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀä‰!Ó
+constB
+1model_encoder_layer_3_attention_output_dense_bias
+
+€*G
+name?
+7
+5"3
+1model_encoder_layer_3_attention_output_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€å­!å
+constK
+3model_encoder_layer_3_attention_output_dense_weight
+
+€
+€*I
+nameA
+9
+7"5
+3model_encoder_layer_3_attention_output_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀñ­!Û
+constF
+5model_encoder_layer_3_attention_output_LayerNorm_bias
+
+€*K
+nameC
+;
+9"7
+5model_encoder_layer_3_attention_output_LayerNorm_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€òÑ!ß
+constH
+7model_encoder_layer_3_attention_output_LayerNorm_weight
+
+€*M
+nameE
+=
+;"9
+7model_encoder_layer_3_attention_output_LayerNorm_weight*=
+val6
+
+€*%
+@model_path/weights/weight.binÀþÑ!Ë
+const>
+-model_encoder_layer_3_intermediate_dense_bias
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_3_intermediate_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€‹Ò!Ý
+constG
+/model_encoder_layer_3_intermediate_dense_weight
+
+€
+€*E
+name=
+5
+3"1
+/model_encoder_layer_3_intermediate_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ»Ò!¿
+const8
+'model_encoder_layer_3_output_dense_bias
+
+€*=
+name5
+-
++")
+'model_encoder_layer_3_output_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€¼â"Ñ
+constA
+)model_encoder_layer_3_output_dense_weight
+
+€
+€*?
+name7
+/
+-"+
+)model_encoder_layer_3_output_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀÈâ"Ç
+const<
++model_encoder_layer_3_output_LayerNorm_bias
+
+€*A
+name9
+1
+/"-
++model_encoder_layer_3_output_LayerNorm_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€Éò#Ë
+const>
+-model_encoder_layer_3_output_LayerNorm_weight
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_3_output_LayerNorm_weight*=
+val6
+
+€*%
+@model_path/weights/weight.binÀÕò#Ï
+const@
+/model_encoder_layer_4_attention_self_query_bias
+
+€*E
+name=
+5
+3"1
+/model_encoder_layer_4_attention_self_query_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€âò#á
+constI
+1model_encoder_layer_4_attention_self_query_weight
+
+€
+€*G
+name?
+7
+5"3
+1model_encoder_layer_4_attention_self_query_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀîò#Ë
+const>
+-model_encoder_layer_4_attention_self_key_bias
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_4_attention_self_key_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€ï–$Ý
+constG
+/model_encoder_layer_4_attention_self_key_weight
+
+€
+€*E
+name=
+5
+3"1
+/model_encoder_layer_4_attention_self_key_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀû–$Ï
+const@
+/model_encoder_layer_4_attention_self_value_bias
+
+€*E
+name=
+5
+3"1
+/model_encoder_layer_4_attention_self_value_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€üº$á
+constI
+1model_encoder_layer_4_attention_self_value_weight
+
+€
+€*G
+name?
+7
+5"3
+1model_encoder_layer_4_attention_self_value_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀˆ»$Ó
+constB
+1model_encoder_layer_4_attention_output_dense_bias
+
+€*G
+name?
+7
+5"3
+1model_encoder_layer_4_attention_output_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€‰ß$å
+constK
+3model_encoder_layer_4_attention_output_dense_weight
+
+€
+€*I
+nameA
+9
+7"5
+3model_encoder_layer_4_attention_output_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ•ß$Û
+constF
+5model_encoder_layer_4_attention_output_LayerNorm_bias
+
+€*K
+nameC
+;
+9"7
+5model_encoder_layer_4_attention_output_LayerNorm_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€–ƒ%ß
+constH
+7model_encoder_layer_4_attention_output_LayerNorm_weight
+
+€*M
+nameE
+=
+;"9
+7model_encoder_layer_4_attention_output_LayerNorm_weight*=
+val6
+
+€*%
+@model_path/weights/weight.binÀ¢ƒ%Ë
+const>
+-model_encoder_layer_4_intermediate_dense_bias
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_4_intermediate_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€¯ƒ%Ý
+constG
+/model_encoder_layer_4_intermediate_dense_weight
+
+€
+€*E
+name=
+5
+3"1
+/model_encoder_layer_4_intermediate_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ߃%¿
+const8
+'model_encoder_layer_4_output_dense_bias
+
+€*=
+name5
+-
++")
+'model_encoder_layer_4_output_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€à“&Ñ
+constA
+)model_encoder_layer_4_output_dense_weight
+
+€
+€*?
+name7
+/
+-"+
+)model_encoder_layer_4_output_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀì“&Ç
+const<
++model_encoder_layer_4_output_LayerNorm_bias
+
+€*A
+name9
+1
+/"-
++model_encoder_layer_4_output_LayerNorm_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€í£'Ë
+const>
+-model_encoder_layer_4_output_LayerNorm_weight
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_4_output_LayerNorm_weight*=
+val6
+
+€*%
+@model_path/weights/weight.binÀù£'Ï
+const@
+/model_encoder_layer_5_attention_self_query_bias
+
+€*E
+name=
+5
+3"1
+/model_encoder_layer_5_attention_self_query_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€†¤'á
+constI
+1model_encoder_layer_5_attention_self_query_weight
+
+€
+€*G
+name?
+7
+5"3
+1model_encoder_layer_5_attention_self_query_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ’¤'Ë
+const>
+-model_encoder_layer_5_attention_self_key_bias
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_5_attention_self_key_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€“È'Ý
+constG
+/model_encoder_layer_5_attention_self_key_weight
+
+€
+€*E
+name=
+5
+3"1
+/model_encoder_layer_5_attention_self_key_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀŸÈ'Ï
+const@
+/model_encoder_layer_5_attention_self_value_bias
+
+€*E
+name=
+5
+3"1
+/model_encoder_layer_5_attention_self_value_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€ ì'á
+constI
+1model_encoder_layer_5_attention_self_value_weight
+
+€
+€*G
+name?
+7
+5"3
+1model_encoder_layer_5_attention_self_value_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ¬ì'Ó
+constB
+1model_encoder_layer_5_attention_output_dense_bias
+
+€*G
+name?
+7
+5"3
+1model_encoder_layer_5_attention_output_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€­(å
+constK
+3model_encoder_layer_5_attention_output_dense_weight
+
+€
+€*I
+nameA
+9
+7"5
+3model_encoder_layer_5_attention_output_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ¹(Û
+constF
+5model_encoder_layer_5_attention_output_LayerNorm_bias
+
+€*K
+nameC
+;
+9"7
+5model_encoder_layer_5_attention_output_LayerNorm_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€º´(ß
+constH
+7model_encoder_layer_5_attention_output_LayerNorm_weight
+
+€*M
+nameE
+=
+;"9
+7model_encoder_layer_5_attention_output_LayerNorm_weight*=
+val6
+
+€*%
+@model_path/weights/weight.binÀÆ´(Ë
+const>
+-model_encoder_layer_5_intermediate_dense_bias
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_5_intermediate_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€Ó´(Ý
+constG
+/model_encoder_layer_5_intermediate_dense_weight
+
+€
+€*E
+name=
+5
+3"1
+/model_encoder_layer_5_intermediate_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀƒµ(¿
+const8
+'model_encoder_layer_5_output_dense_bias
+
+€*=
+name5
+-
++")
+'model_encoder_layer_5_output_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€„Å)Ñ
+constA
+)model_encoder_layer_5_output_dense_weight
+
+€
+€*?
+name7
+/
+-"+
+)model_encoder_layer_5_output_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀÅ)Ç
+const<
++model_encoder_layer_5_output_LayerNorm_bias
+
+€*A
+name9
+1
+/"-
++model_encoder_layer_5_output_LayerNorm_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€‘Õ*Ë
+const>
+-model_encoder_layer_5_output_LayerNorm_weight
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_5_output_LayerNorm_weight*=
+val6
+
+€*%
+@model_path/weights/weight.binÀÕ*Ï
+const@
+/model_encoder_layer_6_attention_self_query_bias
+
+€*E
+name=
+5
+3"1
+/model_encoder_layer_6_attention_self_query_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€ªÕ*á
+constI
+1model_encoder_layer_6_attention_self_query_weight
+
+€
+€*G
+name?
+7
+5"3
+1model_encoder_layer_6_attention_self_query_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ¶Õ*Ë
+const>
+-model_encoder_layer_6_attention_self_key_bias
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_6_attention_self_key_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€·ù*Ý
+constG
+/model_encoder_layer_6_attention_self_key_weight
+
+€
+€*E
+name=
+5
+3"1
+/model_encoder_layer_6_attention_self_key_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀÃù*Ï
+const@
+/model_encoder_layer_6_attention_self_value_bias
+
+€*E
+name=
+5
+3"1
+/model_encoder_layer_6_attention_self_value_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€Ä+á
+constI
+1model_encoder_layer_6_attention_self_value_weight
+
+€
+€*G
+name?
+7
+5"3
+1model_encoder_layer_6_attention_self_value_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀН+Ó
+constB
+1model_encoder_layer_6_attention_output_dense_bias
+
+€*G
+name?
+7
+5"3
+1model_encoder_layer_6_attention_output_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€ÑÁ+å
+constK
+3model_encoder_layer_6_attention_output_dense_weight
+
+€
+€*I
+nameA
+9
+7"5
+3model_encoder_layer_6_attention_output_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀÝÁ+Û
+constF
+5model_encoder_layer_6_attention_output_LayerNorm_bias
+
+€*K
+nameC
+;
+9"7
+5model_encoder_layer_6_attention_output_LayerNorm_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€Þå+ß
+constH
+7model_encoder_layer_6_attention_output_LayerNorm_weight
+
+€*M
+nameE
+=
+;"9
+7model_encoder_layer_6_attention_output_LayerNorm_weight*=
+val6
+
+€*%
+@model_path/weights/weight.binÀêå+Ë
+const>
+-model_encoder_layer_6_intermediate_dense_bias
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_6_intermediate_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€÷å+Ý
+constG
+/model_encoder_layer_6_intermediate_dense_weight
+
+€
+€*E
+name=
+5
+3"1
+/model_encoder_layer_6_intermediate_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ§æ+¿
+const8
+'model_encoder_layer_6_output_dense_bias
+
+€*=
+name5
+-
++")
+'model_encoder_layer_6_output_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€¨ö,Ñ
+constA
+)model_encoder_layer_6_output_dense_weight
+
+€
+€*?
+name7
+/
+-"+
+)model_encoder_layer_6_output_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ´ö,Ç
+const<
++model_encoder_layer_6_output_LayerNorm_bias
+
+€*A
+name9
+1
+/"-
++model_encoder_layer_6_output_LayerNorm_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€µ†.Ë
+const>
+-model_encoder_layer_6_output_LayerNorm_weight
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_6_output_LayerNorm_weight*=
+val6
+
+€*%
+@model_path/weights/weight.binÀÁ†.Ï
+const@
+/model_encoder_layer_7_attention_self_query_bias
+
+€*E
+name=
+5
+3"1
+/model_encoder_layer_7_attention_self_query_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€Î†.á
+constI
+1model_encoder_layer_7_attention_self_query_weight
+
+€
+€*G
+name?
+7
+5"3
+1model_encoder_layer_7_attention_self_query_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀÚ†.Ë
+const>
+-model_encoder_layer_7_attention_self_key_bias
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_7_attention_self_key_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€Ûª.Ý
+constG
+/model_encoder_layer_7_attention_self_key_weight
+
+€
+€*E
+name=
+5
+3"1
+/model_encoder_layer_7_attention_self_key_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀçª.Ï
+const@
+/model_encoder_layer_7_attention_self_value_bias
+
+€*E
+name=
+5
+3"1
+/model_encoder_layer_7_attention_self_value_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€èÎ.á
+constI
+1model_encoder_layer_7_attention_self_value_weight
+
+€
+€*G
+name?
+7
+5"3
+1model_encoder_layer_7_attention_self_value_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀôÎ.Ó
+constB
+1model_encoder_layer_7_attention_output_dense_bias
+
+€*G
+name?
+7
+5"3
+1model_encoder_layer_7_attention_output_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€õò.å
+constK
+3model_encoder_layer_7_attention_output_dense_weight
+
+€
+€*I
+nameA
+9
+7"5
+3model_encoder_layer_7_attention_output_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀó.Û
+constF
+5model_encoder_layer_7_attention_output_LayerNorm_bias
+
+€*K
+nameC
+;
+9"7
+5model_encoder_layer_7_attention_output_LayerNorm_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€‚—/ß
+constH
+7model_encoder_layer_7_attention_output_LayerNorm_weight
+
+€*M
+nameE
+=
+;"9
+7model_encoder_layer_7_attention_output_LayerNorm_weight*=
+val6
+
+€*%
+@model_path/weights/weight.binÀŽ—/Ë
+const>
+-model_encoder_layer_7_intermediate_dense_bias
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_7_intermediate_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€›—/Ý
+constG
+/model_encoder_layer_7_intermediate_dense_weight
+
+€
+€*E
+name=
+5
+3"1
+/model_encoder_layer_7_intermediate_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀË—/¿
+const8
+'model_encoder_layer_7_output_dense_bias
+
+€*=
+name5
+-
++")
+'model_encoder_layer_7_output_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€Ì§0Ñ
+constA
+)model_encoder_layer_7_output_dense_weight
+
+€
+€*?
+name7
+/
+-"+
+)model_encoder_layer_7_output_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀا0Ç
+const<
++model_encoder_layer_7_output_LayerNorm_bias
+
+€*A
+name9
+1
+/"-
++model_encoder_layer_7_output_LayerNorm_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€Ù·1Ë
+const>
+-model_encoder_layer_7_output_LayerNorm_weight
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_7_output_LayerNorm_weight*=
+val6
+
+€*%
+@model_path/weights/weight.binÀå·1Ï
+const@
+/model_encoder_layer_8_attention_self_query_bias
+
+€*E
+name=
+5
+3"1
+/model_encoder_layer_8_attention_self_query_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€ò·1á
+constI
+1model_encoder_layer_8_attention_self_query_weight
+
+€
+€*G
+name?
+7
+5"3
+1model_encoder_layer_8_attention_self_query_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀþ·1Ë
+const>
+-model_encoder_layer_8_attention_self_key_bias
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_8_attention_self_key_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€ÿÛ1Ý
+constG
+/model_encoder_layer_8_attention_self_key_weight
+
+€
+€*E
+name=
+5
+3"1
+/model_encoder_layer_8_attention_self_key_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ‹Ü1Ï
+const@
+/model_encoder_layer_8_attention_self_value_bias
+
+€*E
+name=
+5
+3"1
+/model_encoder_layer_8_attention_self_value_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€Œ€2á
+constI
+1model_encoder_layer_8_attention_self_value_weight
+
+€
+€*G
+name?
+7
+5"3
+1model_encoder_layer_8_attention_self_value_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ˜€2Ó
+constB
+1model_encoder_layer_8_attention_output_dense_bias
+
+€*G
+name?
+7
+5"3
+1model_encoder_layer_8_attention_output_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€™¤2å
+constK
+3model_encoder_layer_8_attention_output_dense_weight
+
+€
+€*I
+nameA
+9
+7"5
+3model_encoder_layer_8_attention_output_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ¥¤2Û
+constF
+5model_encoder_layer_8_attention_output_LayerNorm_bias
+
+€*K
+nameC
+;
+9"7
+5model_encoder_layer_8_attention_output_LayerNorm_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€¦È2ß
+constH
+7model_encoder_layer_8_attention_output_LayerNorm_weight
+
+€*M
+nameE
+=
+;"9
+7model_encoder_layer_8_attention_output_LayerNorm_weight*=
+val6
+
+€*%
+@model_path/weights/weight.binÀ²È2Ë
+const>
+-model_encoder_layer_8_intermediate_dense_bias
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_8_intermediate_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€¿È2Ý
+constG
+/model_encoder_layer_8_intermediate_dense_weight
+
+€
+€*E
+name=
+5
+3"1
+/model_encoder_layer_8_intermediate_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀïÈ2¿
+const8
+'model_encoder_layer_8_output_dense_bias
+
+€*=
+name5
+-
++")
+'model_encoder_layer_8_output_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€ðØ3Ñ
+constA
+)model_encoder_layer_8_output_dense_weight
+
+€
+€*?
+name7
+/
+-"+
+)model_encoder_layer_8_output_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀüØ3Ç
+const<
++model_encoder_layer_8_output_LayerNorm_bias
+
+€*A
+name9
+1
+/"-
++model_encoder_layer_8_output_LayerNorm_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€ýè4Ë
+const>
+-model_encoder_layer_8_output_LayerNorm_weight
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_8_output_LayerNorm_weight*=
+val6
+
+€*%
+@model_path/weights/weight.binÀ‰é4Ï
+const@
+/model_encoder_layer_9_attention_self_query_bias
+
+€*E
+name=
+5
+3"1
+/model_encoder_layer_9_attention_self_query_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€–é4á
+constI
+1model_encoder_layer_9_attention_self_query_weight
+
+€
+€*G
+name?
+7
+5"3
+1model_encoder_layer_9_attention_self_query_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ¢é4Ë
+const>
+-model_encoder_layer_9_attention_self_key_bias
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_9_attention_self_key_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€£5Ý
+constG
+/model_encoder_layer_9_attention_self_key_weight
+
+€
+€*E
+name=
+5
+3"1
+/model_encoder_layer_9_attention_self_key_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ¯5Ï
+const@
+/model_encoder_layer_9_attention_self_value_bias
+
+€*E
+name=
+5
+3"1
+/model_encoder_layer_9_attention_self_value_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€°±5á
+constI
+1model_encoder_layer_9_attention_self_value_weight
+
+€
+€*G
+name?
+7
+5"3
+1model_encoder_layer_9_attention_self_value_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ¼±5Ó
+constB
+1model_encoder_layer_9_attention_output_dense_bias
+
+€*G
+name?
+7
+5"3
+1model_encoder_layer_9_attention_output_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€½Õ5å
+constK
+3model_encoder_layer_9_attention_output_dense_weight
+
+€
+€*I
+nameA
+9
+7"5
+3model_encoder_layer_9_attention_output_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀÉÕ5Û
+constF
+5model_encoder_layer_9_attention_output_LayerNorm_bias
+
+€*K
+nameC
+;
+9"7
+5model_encoder_layer_9_attention_output_LayerNorm_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€Êù5ß
+constH
+7model_encoder_layer_9_attention_output_LayerNorm_weight
+
+€*M
+nameE
+=
+;"9
+7model_encoder_layer_9_attention_output_LayerNorm_weight*=
+val6
+
+€*%
+@model_path/weights/weight.binÀÖù5Ë
+const>
+-model_encoder_layer_9_intermediate_dense_bias
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_9_intermediate_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€ãù5Ý
+constG
+/model_encoder_layer_9_intermediate_dense_weight
+
+€
+€*E
+name=
+5
+3"1
+/model_encoder_layer_9_intermediate_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ“ú5¿
+const8
+'model_encoder_layer_9_output_dense_bias
+
+€*=
+name5
+-
++")
+'model_encoder_layer_9_output_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€”Š7Ñ
+constA
+)model_encoder_layer_9_output_dense_weight
+
+€
+€*?
+name7
+/
+-"+
+)model_encoder_layer_9_output_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ Š7Ç
+const<
++model_encoder_layer_9_output_LayerNorm_bias
+
+€*A
+name9
+1
+/"-
++model_encoder_layer_9_output_LayerNorm_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€¡š8Ë
+const>
+-model_encoder_layer_9_output_LayerNorm_weight
+
+€*C
+name;
+3
+1"/
+-model_encoder_layer_9_output_LayerNorm_weight*=
+val6
+
+€*%
+@model_path/weights/weight.binÀ­š8Ñ
+constA
+0model_encoder_layer_10_attention_self_query_bias
+
+€*F
+name>
+6
+4"2
+0model_encoder_layer_10_attention_self_query_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€ºš8ã
+constJ
+2model_encoder_layer_10_attention_self_query_weight
+
+€
+€*H
+name@
+8
+6"4
+2model_encoder_layer_10_attention_self_query_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀÆš8Í
+const?
+.model_encoder_layer_10_attention_self_key_bias
+
+€*D
+name<
+4
+2"0
+.model_encoder_layer_10_attention_self_key_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€Ç¾8ß
+constH
+0model_encoder_layer_10_attention_self_key_weight
+
+€
+€*F
+name>
+6
+4"2
+0model_encoder_layer_10_attention_self_key_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀÓ¾8Ñ
+constA
+0model_encoder_layer_10_attention_self_value_bias
+
+€*F
+name>
+6
+4"2
+0model_encoder_layer_10_attention_self_value_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€Ôâ8ã
+constJ
+2model_encoder_layer_10_attention_self_value_weight
+
+€
+€*H
+name@
+8
+6"4
+2model_encoder_layer_10_attention_self_value_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀàâ8Õ
+constC
+2model_encoder_layer_10_attention_output_dense_bias
+
+€*H
+name@
+8
+6"4
+2model_encoder_layer_10_attention_output_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€á†9ç
+constL
+4model_encoder_layer_10_attention_output_dense_weight
+
+€
+€*J
+nameB
+:
+8"6
+4model_encoder_layer_10_attention_output_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀí†9Ý
+constG
+6model_encoder_layer_10_attention_output_LayerNorm_bias
+
+€*L
+nameD
+<
+:"8
+6model_encoder_layer_10_attention_output_LayerNorm_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€îª9á
+constI
+8model_encoder_layer_10_attention_output_LayerNorm_weight
+
+€*N
+nameF
+>
+<":
+8model_encoder_layer_10_attention_output_LayerNorm_weight*=
+val6
+
+€*%
+@model_path/weights/weight.binÀúª9Í
+const?
+.model_encoder_layer_10_intermediate_dense_bias
+
+€*D
+name<
+4
+2"0
+.model_encoder_layer_10_intermediate_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€‡«9ß
+constH
+0model_encoder_layer_10_intermediate_dense_weight
+
+€
+€*F
+name>
+6
+4"2
+0model_encoder_layer_10_intermediate_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ·«9Á
+const9
+(model_encoder_layer_10_output_dense_bias
+
+€*>
+name6
+.
+,"*
+(model_encoder_layer_10_output_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€¸»:Ó
+constB
+*model_encoder_layer_10_output_dense_weight
+
+€
+€*@
+name8
+0
+.",
+*model_encoder_layer_10_output_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀÄ»:É
+const=
+,model_encoder_layer_10_output_LayerNorm_bias
+
+€*B
+name:
+2
+0".
+,model_encoder_layer_10_output_LayerNorm_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€ÅË;Í
+const?
+.model_encoder_layer_10_output_LayerNorm_weight
+
+€*D
+name<
+4
+2"0
+.model_encoder_layer_10_output_LayerNorm_weight*=
+val6
+
+€*%
+@model_path/weights/weight.binÀÑË;Ñ
+constA
+0model_encoder_layer_11_attention_self_query_bias
+
+€*F
+name>
+6
+4"2
+0model_encoder_layer_11_attention_self_query_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€ÞË;ã
+constJ
+2model_encoder_layer_11_attention_self_query_weight
+
+€
+€*H
+name@
+8
+6"4
+2model_encoder_layer_11_attention_self_query_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀêË;Í
+const?
+.model_encoder_layer_11_attention_self_key_bias
+
+€*D
+name<
+4
+2"0
+.model_encoder_layer_11_attention_self_key_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€ëï;ß
+constH
+0model_encoder_layer_11_attention_self_key_weight
+
+€
+€*F
+name>
+6
+4"2
+0model_encoder_layer_11_attention_self_key_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ÷ï;Ñ
+constA
+0model_encoder_layer_11_attention_self_value_bias
+
+€*F
+name>
+6
+4"2
+0model_encoder_layer_11_attention_self_value_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€ø“<ã
+constJ
+2model_encoder_layer_11_attention_self_value_weight
+
+€
+€*H
+name@
+8
+6"4
+2model_encoder_layer_11_attention_self_value_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ„”<Õ
+constC
+2model_encoder_layer_11_attention_output_dense_bias
+
+€*H
+name@
+8
+6"4
+2model_encoder_layer_11_attention_output_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€…¸<ç
+constL
+4model_encoder_layer_11_attention_output_dense_weight
+
+€
+€*J
+nameB
+:
+8"6
+4model_encoder_layer_11_attention_output_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀ‘¸<Ý
+constG
+6model_encoder_layer_11_attention_output_LayerNorm_bias
+
+€*L
+nameD
+<
+:"8
+6model_encoder_layer_11_attention_output_LayerNorm_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€’Ü<á
+constI
+8model_encoder_layer_11_attention_output_LayerNorm_weight
+
+€*N
+nameF
+>
+<":
+8model_encoder_layer_11_attention_output_LayerNorm_weight*=
+val6
+
+€*%
+@model_path/weights/weight.binÀžÜ<Í
+const?
+.model_encoder_layer_11_intermediate_dense_bias
+
+€*D
+name<
+4
+2"0
+.model_encoder_layer_11_intermediate_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€«Ü<ß
+constH
+0model_encoder_layer_11_intermediate_dense_weight
+
+€
+€*F
+name>
+6
+4"2
+0model_encoder_layer_11_intermediate_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀÛÜ<Á
+const9
+(model_encoder_layer_11_output_dense_bias
+
+€*>
+name6
+.
+,"*
+(model_encoder_layer_11_output_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€Üì=Ó
+constB
+*model_encoder_layer_11_output_dense_weight
+
+€
+€*@
+name8
+0
+.",
+*model_encoder_layer_11_output_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀèì=É
+const=
+,model_encoder_layer_11_output_LayerNorm_bias
+
+€*B
+name:
+2
+0".
+,model_encoder_layer_11_output_LayerNorm_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€éü>Í
+const?
+.model_encoder_layer_11_output_LayerNorm_weight
+
+€*D
+name<
+4
+2"0
+.model_encoder_layer_11_output_LayerNorm_weight*=
+val6
+
+€*%
+@model_path/weights/weight.binÀõü>Ÿ
+const(
+model_pooler_dense_bias
+
+€*-
+name%
+
+"
+model_pooler_dense_bias*=
+val6
+
+€*%
+@model_path/weights/weight.bin€‚ý>±
+const1
+model_pooler_dense_weight
+
+€
+€*/
+name'
+
+"
+model_pooler_dense_weight*D
+val=
+
+€
+€*%
+@model_path/weights/weight.binÀŽý>S
+const
+var_8
+*
+name
+
+
+"
+op_8*
+val
+
+
+
+ÿÿÿÿÿÿÿÿÿO
+const
+var_10
+*
+name
+
+	"
+op_10*
+val
+
+
+
+
+̼Œ+O
+const
+var_13
+*
+name
+
+	"
+op_13*
+val
+
+
+
+
+��€?j
+const
+
var_34_axes_0
+
+
+*"
+name
+
+"
+op_34_axes_0*
+val
+
+
+
+
+ƒ
+expand_dims
+x
+
+attention_mask
+axes
+
+
var_34_axes_0#
+var_34
+
+
+
+€*
+name
+
+	"
+op_34j
+const
+
var_35_axes_0
+
+
+*"
+name
+
+"
+op_35_axes_0*
+val
+
+
+
+
+
+expand_dims
+x
+
+
+var_34
+axes
+
+
var_35_axes_0)
+var_35
+
+
+
+
+€*
+name
+
+	"
+op_35_
+const
+var_37_dtype_0
+*#
+name
+
+"
+
op_37_dtype_0*
+val
+
+
+"
+fp32
+cast
+x
+
+
+var_35
+dtype
+
+var_37_dtype_0*
+cast_75
+
+
+
+
+€*
+name
+
+"	
+cast_75p
+sub
+x
+
+
+var_13
+y
+	
+cast_75)
+var_38
+
+
+
+
+€*
+name
+
+	"
+op_38O
+const
+var_39
+*
+name
+
+	"
+op_39*
+val
+
+
+
+
+ÿÿÿ‚
+mul
+x
+
+
+var_38
+y
+
+
+var_393
+attention_mask_1
+
+
+
+
+€*$
+name
+
+"
+attention_maski
+const
+inputs_embeds_axis_0
+**
+name"
+
+"
+inputs_embeds_axis_0*
+val
+
+
+�È
+gather0
+x+
+)
+'model_embeddings_word_embeddings_weight
+indices
+
+	input_ids 
+axis
+
+inputs_embeds_axis_0+
+
inputs_embeds
+
+
+€
+€*#
+name
+
+"
+
inputs_embeds¹
+const5
+token_type_embeddings_1
+
+
+€
+€*-
+name%
+
+"
+token_type_embeddings_1*J
+valC
+
+
+€
+€*%
+@model_path/weights/weight.bin€¡?
+add
+x
+
+
inputs_embeds 
+y
+
+token_type_embeddings_1*
+embeddings_1
+
+
+€
+€*"
+name
+
+"
+embeddings_1µ
+const3
+position_embeddings_1
+
+
+€
+€*+
+name#
+
+"
+position_embeddings_1*J
+valC
+
+
+€
+€*%
+@model_path/weights/weight.binÀ­?‚
+add
+x
+
+embeddings_1
+y
+
+position_embeddings_1%
+input_5
+
+
+€
+€*
+name
+
+"	
+input_5v
+const
+input_7_axes_0
+
+
+*$
+name
+
+"
+input_7_axes_0*'
+val 
+
+
+
+
+
+ÿÿÿÿÿÿÿÿÿô
+
+layer_norm
+x
+	
+input_5
+axes
+
+input_7_axes_0.
+gamma%
+#
+!model_embeddings_LayerNorm_weight+
+beta#
+!
+model_embeddings_LayerNorm_bias
+epsilon
+
+
+var_10%
+input_7
+
+
+€
+€*
+name
+
+"	
+input_7à
+linear
+x
+	
+input_7?
+weight5
+3
+1model_encoder_layer_0_attention_self_query_weight;
+bias3
+1
+/model_encoder_layer_0_attention_self_query_bias&
+linear_0
+
+
+€
+€*
+name
+
+"
+
+linear_0Ü
+linear
+x
+	
+input_7=
+weight3
+1
+/model_encoder_layer_0_attention_self_key_weight9
+bias1
+/
+-model_encoder_layer_0_attention_self_key_bias&
+linear_1
+
+
+€
+€*
+name
+
+"
+
+linear_1b
+const
+var_106
+
+
+*
+name
+
+
+"
+op_106*"
+val
+
+
+
+	
+€ u
+reshape
+x
+
+
+linear_1
+shape
+	
+var_106&
+x_3
+
+
+€
+
+ *
+name
+	
+"
+x_3à
+linear
+x
+	
+input_7?
+weight5
+3
+1model_encoder_layer_0_attention_self_value_weight;
+bias3
+1
+/model_encoder_layer_0_attention_self_value_bias&
+linear_2
+
+
+€
+€*
+name
+
+"
+
+linear_2b
+const
+var_115
+
+
+*
+name
+
+
+"
+op_115*"
+val
+
+
+
+	
+€ u
+reshape
+x
+
+
+linear_2
+shape
+	
+var_115&
+x_7
+
+
+€
+
+ *
+name
+	
+"
+x_7a
+const
+var_117
+
+
+*
+name
+
+
+"
+op_117*!
+val
+
+
+
+
+
+�b
+const
+var_121
+
+
+*
+name
+
+
+"
+op_121*"
+val
+
+
+
+	
+€ w
+reshape
+x
+
+
+linear_0
+shape
+	
+var_121'
+x_11
+
+
+€
+
+ *
+name
+
+
+"
+x_11
+const(
+ attention_scores_1_transpose_x_0
+*6
+name.
+&
+$""
+ attention_scores_1_transpose_x_0*
+val
+
+
+�
+const(
+ attention_scores_1_transpose_y_0
+*6
+name.
+&
+$""
+ attention_scores_1_transpose_y_0*
+val
+
+
+�z
+const#
+transpose_36_perm_0
+
+
+*)
+name!
+
+"
+transpose_36_perm_0*!
+val
+
+
+
+
+
+�z
+const#
+transpose_37_perm_0
+
+
+*)
+name!
+
+"
+transpose_37_perm_0*!
+val
+
+
+
+
+
+�‘
+	transpose
+x
+
+x_3
+perm
+
+transpose_37_perm_00
+
transpose_105
+
+
+
+ 
+€*#
+name
+
+"
+
transpose_105’
+	transpose
+x
+
+x_11
+perm
+
+transpose_36_perm_00
+
transpose_106
+
+
+
+€
+ *#
+name
+
+"
+
transpose_106„
+matmul
+x
+
+
transpose_106
+y
+
+
transpose_1053
+transpose_x$
+"
+ attention_scores_1_transpose_x_03
+transpose_y$
+"
+ attention_scores_1_transpose_y_06
+attention_scores_1 
+
+
+
+€
+€*(
+name 
+
+"
+attention_scores_1„
+const(
+ _inversed_attention_scores_3_y_0
+*6
+name.
+&
+$""
+ _inversed_attention_scores_3_y_0*
+val
+
+
+
+
+ó5>Ã
+mul
+x
+
+attention_scores_1)
+y$
+"
+ _inversed_attention_scores_3_y_0@
+_inversed_attention_scores_3 
+
+
+
+€
+€*2
+name*
+"
+ "
+_inversed_attention_scores_3•
+add%
+x 
+
+_inversed_attention_scores_3
+y
+
+attention_mask_1,
+input_11 
+
+
+
+€
+€*
+name
+
+"
+
+input_11}
+softmax
+x
+
+
+input_11
+axis	
+
+var_8,
+input_13 
+
+
+
+€
+€*
+name
+
+"
+
+input_13{
+const%
+context_layer_1_transpose_x_0
+*3
+name+
+#
+!"
+context_layer_1_transpose_x_0*
+val
+
+
+�{
+const%
+context_layer_1_transpose_y_0
+*3
+name+
+#
+!"
+context_layer_1_transpose_y_0*
+val
+
+
+�…
+	transpose
+x
+
+x_7
+perm
+	
+var_1170
+
transpose_107
+
+
+
+€
+ *#
+name
+
+"
+
transpose_107ò
+matmul
+x
+
+
+input_13
+y
+
+
transpose_1070
+transpose_x!
+
+context_layer_1_transpose_x_00
+transpose_y!
+
+context_layer_1_transpose_y_02
+context_layer_1
+
+
+
+€
+ *%
+name
+
+"
+context_layer_1a
+const
+var_133
+
+
+*
+name
+
+
+"
+op_133*!
+val
+
+
+
+
+
+�b
+const
+var_138
+
+
+*
+name
+
+
+"
+op_138*"
+val
+
+
+
+	
+€€‘
+	transpose
+x
+
+context_layer_1
+perm
+	
+var_1330
+
transpose_104
+
+
+€
+
+ *#
+name
+
+"
+
transpose_104
+reshape
+x
+
+
transpose_104
+shape
+	
+var_138&
+input_15
+
+
+€
+€*
+name
+
+"
+
+input_15å
+linear
+x
+
+
+input_15A
+weight7
+5
+3model_encoder_layer_0_attention_output_dense_weight=
+bias5
+3
+1model_encoder_layer_0_attention_output_dense_bias&
+linear_3
+
+
+€
+€*
+name
+
+"
+
+linear_3r
+add
+x
+
+
+linear_3
+y
+	
+input_7&
+input_19
+
+
+€
+€*
+name
+
+"
+
+input_19x
+const
+input_21_axes_0
+
+
+*%
+name
+
+"
+input_21_axes_0*'
+val 
+
+
+
+
+
+ÿÿÿÿÿÿÿÿÿ¤
+
+layer_norm
+x
+
+
+input_19
+axes
+
+input_21_axes_0D
+gamma;
+9
+7model_encoder_layer_0_attention_output_LayerNorm_weightA
+beta9
+7
+5model_encoder_layer_0_attention_output_LayerNorm_bias
+epsilon
+
+
+var_10&
+input_21
+
+
+€
+€*
+name
+
+"
+
+input_21Ý
+linear
+x
+
+
+input_21=
+weight3
+1
+/model_encoder_layer_0_intermediate_dense_weight9
+bias1
+/
+-model_encoder_layer_0_intermediate_dense_bias&
+linear_4
+
+
+€
+€*
+name
+
+"
+
+linear_4c
+const
+input_25_mode_0
+*%
+name
+
+"
+input_25_mode_0*
+val
+
+	"
+EXACT~
+gelu
+x
+
+
+linear_4
+mode
+
+input_25_mode_0&
+input_25
+
+
+€
+€*
+name
+
+"
+
+input_25Ñ
+linear
+x
+
+
+input_257
+weight-
++
+)model_encoder_layer_0_output_dense_weight3
+bias+
+)
+'model_encoder_layer_0_output_dense_bias&
+linear_5
+
+
+€
+€*
+name
+
+"
+
+linear_5s
+add
+x
+
+
+linear_5
+y
+
+
+input_21&
+input_29
+
+
+€
+€*
+name
+
+"
+
+input_29x
+const
+input_31_axes_0
+
+
+*%
+name
+
+"
+input_31_axes_0*'
+val 
+
+
+
+
+
+ÿÿÿÿÿÿÿÿÿ
+
+layer_norm
+x
+
+
+input_29
+axes
+
+input_31_axes_0:
+gamma1
+/
+-model_encoder_layer_0_output_LayerNorm_weight7
+beta/
+-
++model_encoder_layer_0_output_LayerNorm_bias
+epsilon
+
+
+var_10&
+input_31
+
+
+€
+€*
+name
+
+"
+
+input_31á
+linear
+x
+
+
+input_31?
+weight5
+3
+1model_encoder_layer_1_attention_self_query_weight;
+bias3
+1
+/model_encoder_layer_1_attention_self_query_bias&
+linear_6
+
+
+€
+€*
+name
+
+"
+
+linear_6Ý
+linear
+x
+
+
+input_31=
+weight3
+1
+/model_encoder_layer_1_attention_self_key_weight9
+bias1
+/
+-model_encoder_layer_1_attention_self_key_bias&
+linear_7
+
+
+€
+€*
+name
+
+"
+
+linear_7b
+const
+var_183
+
+
+*
+name
+
+
+"
+op_183*"
+val
+
+
+
+	
+€ w
+reshape
+x
+
+
+linear_7
+shape
+	
+var_183'
+x_15
+
+
+€
+
+ *
+name
+
+
+"
+x_15á
+linear
+x
+
+
+input_31?
+weight5
+3
+1model_encoder_layer_1_attention_self_value_weight;
+bias3
+1
+/model_encoder_layer_1_attention_self_value_bias&
+linear_8
+
+
+€
+€*
+name
+
+"
+
+linear_8b
+const
+var_192
+
+
+*
+name
+
+
+"
+op_192*"
+val
+
+
+
+	
+€ w
+reshape
+x
+
+
+linear_8
+shape
+	
+var_192'
+x_19
+
+
+€
+
+ *
+name
+
+
+"
+x_19a
+const
+var_194
+
+
+*
+name
+
+
+"
+op_194*!
+val
+
+
+
+
+
+�b
+const
+var_198
+
+
+*
+name
+
+
+"
+op_198*"
+val
+
+
+
+	
+€ w
+reshape
+x
+
+
+linear_6
+shape
+	
+var_198'
+x_23
+
+
+€
+
+ *
+name
+
+
+"
+x_23
+const(
+ attention_scores_5_transpose_x_0
+*6
+name.
+&
+$""
+ attention_scores_5_transpose_x_0*
+val
+
+
+�
+const(
+ attention_scores_5_transpose_y_0
+*6
+name.
+&
+$""
+ attention_scores_5_transpose_y_0*
+val
+
+
+�z
+const#
+transpose_38_perm_0
+
+
+*)
+name!
+
+"
+transpose_38_perm_0*!
+val
+
+
+
+
+
+�z
+const#
+transpose_39_perm_0
+
+
+*)
+name!
+
+"
+transpose_39_perm_0*!
+val
+
+
+
+
+
+�’
+	transpose
+x
+
+x_15
+perm
+
+transpose_39_perm_00
+
transpose_101
+
+
+
+ 
+€*#
+name
+
+"
+
transpose_101’
+	transpose
+x
+
+x_23
+perm
+
+transpose_38_perm_00
+
transpose_102
+
+
+
+€
+ *#
+name
+
+"
+
transpose_102„
+matmul
+x
+
+
transpose_102
+y
+
+
transpose_1013
+transpose_x$
+"
+ attention_scores_5_transpose_x_03
+transpose_y$
+"
+ attention_scores_5_transpose_y_06
+attention_scores_5 
+
+
+
+€
+€*(
+name 
+
+"
+attention_scores_5„
+const(
+ _inversed_attention_scores_7_y_0
+*6
+name.
+&
+$""
+ _inversed_attention_scores_7_y_0*
+val
+
+
+
+
+ó5>Ã
+mul
+x
+
+attention_scores_5)
+y$
+"
+ _inversed_attention_scores_7_y_0@
+_inversed_attention_scores_7 
+
+
+
+€
+€*2
+name*
+"
+ "
+_inversed_attention_scores_7•
+add%
+x 
+
+_inversed_attention_scores_7
+y
+
+attention_mask_1,
+input_33 
+
+
+
+€
+€*
+name
+
+"
+
+input_33}
+softmax
+x
+
+
+input_33
+axis	
+
+var_8,
+input_35 
+
+
+
+€
+€*
+name
+
+"
+
+input_35{
+const%
+context_layer_5_transpose_x_0
+*3
+name+
+#
+!"
+context_layer_5_transpose_x_0*
+val
+
+
+�{
+const%
+context_layer_5_transpose_y_0
+*3
+name+
+#
+!"
+context_layer_5_transpose_y_0*
+val
+
+
+�†
+	transpose
+x
+
+x_19
+perm
+	
+var_1940
+
transpose_103
+
+
+
+€
+ *#
+name
+
+"
+
transpose_103ò
+matmul
+x
+
+
+input_35
+y
+
+
transpose_1030
+transpose_x!
+
+context_layer_5_transpose_x_00
+transpose_y!
+
+context_layer_5_transpose_y_02
+context_layer_5
+
+
+
+€
+ *%
+name
+
+"
+context_layer_5a
+const
+var_210
+
+
+*
+name
+
+
+"
+op_210*!
+val
+
+
+
+
+
+�b
+const
+var_215
+
+
+*
+name
+
+
+"
+op_215*"
+val
+
+
+
+	
+€€‘
+	transpose
+x
+
+context_layer_5
+perm
+	
+var_2100
+
transpose_100
+
+
+€
+
+ *#
+name
+
+"
+
transpose_100
+reshape
+x
+
+
transpose_100
+shape
+	
+var_215&
+input_37
+
+
+€
+€*
+name
+
+"
+
+input_37å
+linear
+x
+
+
+input_37A
+weight7
+5
+3model_encoder_layer_1_attention_output_dense_weight=
+bias5
+3
+1model_encoder_layer_1_attention_output_dense_bias&
+linear_9
+
+
+€
+€*
+name
+
+"
+
+linear_9s
+add
+x
+
+
+linear_9
+y
+
+
+input_31&
+input_41
+
+
+€
+€*
+name
+
+"
+
+input_41x
+const
+input_43_axes_0
+
+
+*%
+name
+
+"
+input_43_axes_0*'
+val 
+
+
+
+
+
+ÿÿÿÿÿÿÿÿÿ¤
+
+layer_norm
+x
+
+
+input_41
+axes
+
+input_43_axes_0D
+gamma;
+9
+7model_encoder_layer_1_attention_output_LayerNorm_weightA
+beta9
+7
+5model_encoder_layer_1_attention_output_LayerNorm_bias
+epsilon
+
+
+var_10&
+input_43
+
+
+€
+€*
+name
+
+"
+
+input_43ß
+linear
+x
+
+
+input_43=
+weight3
+1
+/model_encoder_layer_1_intermediate_dense_weight9
+bias1
+/
+-model_encoder_layer_1_intermediate_dense_bias'
+	linear_10
+
+
+€
+€*
+name
+
+
"
+	linear_10c
+const
+input_47_mode_0
+*%
+name
+
+"
+input_47_mode_0*
+val
+
+	"
+EXACT
+gelu
+x
+
+	linear_10
+mode
+
+input_47_mode_0&
+input_47
+
+
+€
+€*
+name
+
+"
+
+input_47Ó
+linear
+x
+
+
+input_477
+weight-
++
+)model_encoder_layer_1_output_dense_weight3
+bias+
+)
+'model_encoder_layer_1_output_dense_bias'
+	linear_11
+
+
+€
+€*
+name
+
+
"
+	linear_11t
+add
+x
+
+	linear_11
+y
+
+
+input_43&
+input_51
+
+
+€
+€*
+name
+
+"
+
+input_51x
+const
+input_53_axes_0
+
+
+*%
+name
+
+"
+input_53_axes_0*'
+val 
+
+
+
+
+
+ÿÿÿÿÿÿÿÿÿ
+
+layer_norm
+x
+
+
+input_51
+axes
+
+input_53_axes_0:
+gamma1
+/
+-model_encoder_layer_1_output_LayerNorm_weight7
+beta/
+-
++model_encoder_layer_1_output_LayerNorm_bias
+epsilon
+
+
+var_10&
+input_53
+
+
+€
+€*
+name
+
+"
+
+input_53ã
+linear
+x
+
+
+input_53?
+weight5
+3
+1model_encoder_layer_2_attention_self_query_weight;
+bias3
+1
+/model_encoder_layer_2_attention_self_query_bias'
+	linear_12
+
+
+€
+€*
+name
+
+
"
+	linear_12ß
+linear
+x
+
+
+input_53=
+weight3
+1
+/model_encoder_layer_2_attention_self_key_weight9
+bias1
+/
+-model_encoder_layer_2_attention_self_key_bias'
+	linear_13
+
+
+€
+€*
+name
+
+
"
+	linear_13b
+const
+var_260
+
+
+*
+name
+
+
+"
+op_260*"
+val
+
+
+
+	
+€ x
+reshape
+x
+
+	linear_13
+shape
+	
+var_260'
+x_27
+
+
+€
+
+ *
+name
+
+
+"
+x_27ã
+linear
+x
+
+
+input_53?
+weight5
+3
+1model_encoder_layer_2_attention_self_value_weight;
+bias3
+1
+/model_encoder_layer_2_attention_self_value_bias'
+	linear_14
+
+
+€
+€*
+name
+
+
"
+	linear_14b
+const
+var_269
+
+
+*
+name
+
+
+"
+op_269*"
+val
+
+
+
+	
+€ x
+reshape
+x
+
+	linear_14
+shape
+	
+var_269'
+x_31
+
+
+€
+
+ *
+name
+
+
+"
+x_31a
+const
+var_271
+
+
+*
+name
+
+
+"
+op_271*!
+val
+
+
+
+
+
+�b
+const
+var_275
+
+
+*
+name
+
+
+"
+op_275*"
+val
+
+
+
+	
+€ x
+reshape
+x
+
+	linear_12
+shape
+	
+var_275'
+x_35
+
+
+€
+
+ *
+name
+
+
+"
+x_35
+const(
+ attention_scores_9_transpose_x_0
+*6
+name.
+&
+$""
+ attention_scores_9_transpose_x_0*
+val
+
+
+�
+const(
+ attention_scores_9_transpose_y_0
+*6
+name.
+&
+$""
+ attention_scores_9_transpose_y_0*
+val
+
+
+�z
+const#
+transpose_40_perm_0
+
+
+*)
+name!
+
+"
+transpose_40_perm_0*!
+val
+
+
+
+
+
+�z
+const#
+transpose_41_perm_0
+
+
+*)
+name!
+
+"
+transpose_41_perm_0*!
+val
+
+
+
+
+
+�
+	transpose
+x
+
+x_27
+perm
+
+transpose_41_perm_0/
+transpose_97
+
+
+
+ 
+€*"
+name
+
+"
+transpose_97
+	transpose
+x
+
+x_35
+perm
+
+transpose_40_perm_0/
+transpose_98
+
+
+
+€
+ *"
+name
+
+"
+transpose_98‚
+matmul
+x
+
+transpose_98
+y
+
+transpose_973
+transpose_x$
+"
+ attention_scores_9_transpose_x_03
+transpose_y$
+"
+ attention_scores_9_transpose_y_06
+attention_scores_9 
+
+
+
+€
+€*(
+name 
+
+"
+attention_scores_9†
+const)
+!_inversed_attention_scores_11_y_0
+*7
+name/
+'
+%"#
+!_inversed_attention_scores_11_y_0*
+val
+
+
+
+
+ó5>Æ
+mul
+x
+
+attention_scores_9*
+y%
+#
+!_inversed_attention_scores_11_y_0A
+_inversed_attention_scores_11 
+
+
+
+€
+€*3
+name+
+#
+!"
+_inversed_attention_scores_11–
+add&
+x!
+
+_inversed_attention_scores_11
+y
+
+attention_mask_1,
+input_55 
+
+
+
+€
+€*
+name
+
+"
+
+input_55}
+softmax
+x
+
+
+input_55
+axis	
+
+var_8,
+input_57 
+
+
+
+€
+€*
+name
+
+"
+
+input_57{
+const%
+context_layer_9_transpose_x_0
+*3
+name+
+#
+!"
+context_layer_9_transpose_x_0*
+val
+
+
+�{
+const%
+context_layer_9_transpose_y_0
+*3
+name+
+#
+!"
+context_layer_9_transpose_y_0*
+val
+
+
+�„
+	transpose
+x
+
+x_31
+perm
+	
+var_271/
+transpose_99
+
+
+
+€
+ *"
+name
+
+"
+transpose_99ñ
+matmul
+x
+
+
+input_57
+y
+
+transpose_990
+transpose_x!
+
+context_layer_9_transpose_x_00
+transpose_y!
+
+context_layer_9_transpose_y_02
+context_layer_9
+
+
+
+€
+ *%
+name
+
+"
+context_layer_9a
+const
+var_287
+
+
+*
+name
+
+
+"
+op_287*!
+val
+
+
+
+
+
+�b
+const
+var_292
+
+
+*
+name
+
+
+"
+op_292*"
+val
+
+
+
+	
+€€
+	transpose
+x
+
+context_layer_9
+perm
+	
+var_287/
+transpose_96
+
+
+€
+
+ *"
+name
+
+"
+transpose_96~
+reshape
+x
+
+transpose_96
+shape
+	
+var_292&
+input_59
+
+
+€
+€*
+name
+
+"
+
+input_59ç
+linear
+x
+
+
+input_59A
+weight7
+5
+3model_encoder_layer_2_attention_output_dense_weight=
+bias5
+3
+1model_encoder_layer_2_attention_output_dense_bias'
+	linear_15
+
+
+€
+€*
+name
+
+
"
+	linear_15t
+add
+x
+
+	linear_15
+y
+
+
+input_53&
+input_63
+
+
+€
+€*
+name
+
+"
+
+input_63x
+const
+input_65_axes_0
+
+
+*%
+name
+
+"
+input_65_axes_0*'
+val 
+
+
+
+
+
+ÿÿÿÿÿÿÿÿÿ¤
+
+layer_norm
+x
+
+
+input_63
+axes
+
+input_65_axes_0D
+gamma;
+9
+7model_encoder_layer_2_attention_output_LayerNorm_weightA
+beta9
+7
+5model_encoder_layer_2_attention_output_LayerNorm_bias
+epsilon
+
+
+var_10&
+input_65
+
+
+€
+€*
+name
+
+"
+
+input_65ß
+linear
+x
+
+
+input_65=
+weight3
+1
+/model_encoder_layer_2_intermediate_dense_weight9
+bias1
+/
+-model_encoder_layer_2_intermediate_dense_bias'
+	linear_16
+
+
+€
+€*
+name
+
+
"
+	linear_16c
+const
+input_69_mode_0
+*%
+name
+
+"
+input_69_mode_0*
+val
+
+	"
+EXACT
+gelu
+x
+
+	linear_16
+mode
+
+input_69_mode_0&
+input_69
+
+
+€
+€*
+name
+
+"
+
+input_69Ó
+linear
+x
+
+
+input_697
+weight-
++
+)model_encoder_layer_2_output_dense_weight3
+bias+
+)
+'model_encoder_layer_2_output_dense_bias'
+	linear_17
+
+
+€
+€*
+name
+
+
"
+	linear_17t
+add
+x
+
+	linear_17
+y
+
+
+input_65&
+input_73
+
+
+€
+€*
+name
+
+"
+
+input_73x
+const
+input_75_axes_0
+
+
+*%
+name
+
+"
+input_75_axes_0*'
+val 
+
+
+
+
+
+ÿÿÿÿÿÿÿÿÿ
+
+layer_norm
+x
+
+
+input_73
+axes
+
+input_75_axes_0:
+gamma1
+/
+-model_encoder_layer_2_output_LayerNorm_weight7
+beta/
+-
++model_encoder_layer_2_output_LayerNorm_bias
+epsilon
+
+
+var_10&
+input_75
+
+
+€
+€*
+name
+
+"
+
+input_75ã
+linear
+x
+
+
+input_75?
+weight5
+3
+1model_encoder_layer_3_attention_self_query_weight;
+bias3
+1
+/model_encoder_layer_3_attention_self_query_bias'
+	linear_18
+
+
+€
+€*
+name
+
+
"
+	linear_18ß
+linear
+x
+
+
+input_75=
+weight3
+1
+/model_encoder_layer_3_attention_self_key_weight9
+bias1
+/
+-model_encoder_layer_3_attention_self_key_bias'
+	linear_19
+
+
+€
+€*
+name
+
+
"
+	linear_19b
+const
+var_337
+
+
+*
+name
+
+
+"
+op_337*"
+val
+
+
+
+	
+€ x
+reshape
+x
+
+	linear_19
+shape
+	
+var_337'
+x_39
+
+
+€
+
+ *
+name
+
+
+"
+x_39ã
+linear
+x
+
+
+input_75?
+weight5
+3
+1model_encoder_layer_3_attention_self_value_weight;
+bias3
+1
+/model_encoder_layer_3_attention_self_value_bias'
+	linear_20
+
+
+€
+€*
+name
+
+
"
+	linear_20b
+const
+var_346
+
+
+*
+name
+
+
+"
+op_346*"
+val
+
+
+
+	
+€ x
+reshape
+x
+
+	linear_20
+shape
+	
+var_346'
+x_43
+
+
+€
+
+ *
+name
+
+
+"
+x_43a
+const
+var_348
+
+
+*
+name
+
+
+"
+op_348*!
+val
+
+
+
+
+
+�b
+const
+var_352
+
+
+*
+name
+
+
+"
+op_352*"
+val
+
+
+
+	
+€ x
+reshape
+x
+
+	linear_18
+shape
+	
+var_352'
+x_47
+
+
+€
+
+ *
+name
+
+
+"
+x_47ƒ
+const)
+!attention_scores_13_transpose_x_0
+*7
+name/
+'
+%"#
+!attention_scores_13_transpose_x_0*
+val
+
+
+�ƒ
+const)
+!attention_scores_13_transpose_y_0
+*7
+name/
+'
+%"#
+!attention_scores_13_transpose_y_0*
+val
+
+
+�z
+const#
+transpose_42_perm_0
+
+
+*)
+name!
+
+"
+transpose_42_perm_0*!
+val
+
+
+
+
+
+�z
+const#
+transpose_43_perm_0
+
+
+*)
+name!
+
+"
+transpose_43_perm_0*!
+val
+
+
+
+
+
+�
+	transpose
+x
+
+x_39
+perm
+
+transpose_43_perm_0/
+transpose_93
+
+
+
+ 
+€*"
+name
+
+"
+transpose_93
+	transpose
+x
+
+x_47
+perm
+
+transpose_42_perm_0/
+transpose_94
+
+
+
+€
+ *"
+name
+
+"
+transpose_94†
+matmul
+x
+
+transpose_94
+y
+
+transpose_934
+transpose_x%
+#
+!attention_scores_13_transpose_x_04
+transpose_y%
+#
+!attention_scores_13_transpose_y_07
+attention_scores_13 
+
+
+
+€
+€*)
+name!
+
+"
+attention_scores_13†
+const)
+!_inversed_attention_scores_15_y_0
+*7
+name/
+'
+%"#
+!_inversed_attention_scores_15_y_0*
+val
+
+
+
+
+ó5>Ç
+mul
+x
+
+attention_scores_13*
+y%
+#
+!_inversed_attention_scores_15_y_0A
+_inversed_attention_scores_15 
+
+
+
+€
+€*3
+name+
+#
+!"
+_inversed_attention_scores_15–
+add&
+x!
+
+_inversed_attention_scores_15
+y
+
+attention_mask_1,
+input_77 
+
+
+
+€
+€*
+name
+
+"
+
+input_77}
+softmax
+x
+
+
+input_77
+axis	
+
+var_8,
+input_79 
+
+
+
+€
+€*
+name
+
+"
+
+input_79}
+const&
+context_layer_13_transpose_x_0
+*4
+name,
+$
+"" 
+context_layer_13_transpose_x_0*
+val
+
+
+�}
+const&
+context_layer_13_transpose_y_0
+*4
+name,
+$
+"" 
+context_layer_13_transpose_y_0*
+val
+
+
+�„
+	transpose
+x
+
+x_43
+perm
+	
+var_348/
+transpose_95
+
+
+
+€
+ *"
+name
+
+"
+transpose_95õ
+matmul
+x
+
+
+input_79
+y
+
+transpose_951
+transpose_x"
+ 
+context_layer_13_transpose_x_01
+transpose_y"
+ 
+context_layer_13_transpose_y_03
+context_layer_13
+
+
+
+€
+ *&
+name
+
+"
+context_layer_13a
+const
+var_364
+
+
+*
+name
+
+
+"
+op_364*!
+val
+
+
+
+
+
+�b
+const
+var_369
+
+
+*
+name
+
+
+"
+op_369*"
+val
+
+
+
+	
+€€
+	transpose
+x
+
+context_layer_13
+perm
+	
+var_364/
+transpose_92
+
+
+€
+
+ *"
+name
+
+"
+transpose_92~
+reshape
+x
+
+transpose_92
+shape
+	
+var_369&
+input_81
+
+
+€
+€*
+name
+
+"
+
+input_81ç
+linear
+x
+
+
+input_81A
+weight7
+5
+3model_encoder_layer_3_attention_output_dense_weight=
+bias5
+3
+1model_encoder_layer_3_attention_output_dense_bias'
+	linear_21
+
+
+€
+€*
+name
+
+
"
+	linear_21t
+add
+x
+
+	linear_21
+y
+
+
+input_75&
+input_85
+
+
+€
+€*
+name
+
+"
+
+input_85x
+const
+input_87_axes_0
+
+
+*%
+name
+
+"
+input_87_axes_0*'
+val 
+
+
+
+
+
+ÿÿÿÿÿÿÿÿÿ¤
+
+layer_norm
+x
+
+
+input_85
+axes
+
+input_87_axes_0D
+gamma;
+9
+7model_encoder_layer_3_attention_output_LayerNorm_weightA
+beta9
+7
+5model_encoder_layer_3_attention_output_LayerNorm_bias
+epsilon
+
+
+var_10&
+input_87
+
+
+€
+€*
+name
+
+"
+
+input_87ß
+linear
+x
+
+
+input_87=
+weight3
+1
+/model_encoder_layer_3_intermediate_dense_weight9
+bias1
+/
+-model_encoder_layer_3_intermediate_dense_bias'
+	linear_22
+
+
+€
+€*
+name
+
+
"
+	linear_22c
+const
+input_91_mode_0
+*%
+name
+
+"
+input_91_mode_0*
+val
+
+	"
+EXACT
+gelu
+x
+
+	linear_22
+mode
+
+input_91_mode_0&
+input_91
+
+
+€
+€*
+name
+
+"
+
+input_91Ó
+linear
+x
+
+
+input_917
+weight-
++
+)model_encoder_layer_3_output_dense_weight3
+bias+
+)
+'model_encoder_layer_3_output_dense_bias'
+	linear_23
+
+
+€
+€*
+name
+
+
"
+	linear_23t
+add
+x
+
+	linear_23
+y
+
+
+input_87&
+input_95
+
+
+€
+€*
+name
+
+"
+
+input_95x
+const
+input_97_axes_0
+
+
+*%
+name
+
+"
+input_97_axes_0*'
+val 
+
+
+
+
+
+ÿÿÿÿÿÿÿÿÿ
+
+layer_norm
+x
+
+
+input_95
+axes
+
+input_97_axes_0:
+gamma1
+/
+-model_encoder_layer_3_output_LayerNorm_weight7
+beta/
+-
++model_encoder_layer_3_output_LayerNorm_bias
+epsilon
+
+
+var_10&
+input_97
+
+
+€
+€*
+name
+
+"
+
+input_97ã
+linear
+x
+
+
+input_97?
+weight5
+3
+1model_encoder_layer_4_attention_self_query_weight;
+bias3
+1
+/model_encoder_layer_4_attention_self_query_bias'
+	linear_24
+
+
+€
+€*
+name
+
+
"
+	linear_24ß
+linear
+x
+
+
+input_97=
+weight3
+1
+/model_encoder_layer_4_attention_self_key_weight9
+bias1
+/
+-model_encoder_layer_4_attention_self_key_bias'
+	linear_25
+
+
+€
+€*
+name
+
+
"
+	linear_25b
+const
+var_414
+
+
+*
+name
+
+
+"
+op_414*"
+val
+
+
+
+	
+€ x
+reshape
+x
+
+	linear_25
+shape
+	
+var_414'
+x_51
+
+
+€
+
+ *
+name
+
+
+"
+x_51ã
+linear
+x
+
+
+input_97?
+weight5
+3
+1model_encoder_layer_4_attention_self_value_weight;
+bias3
+1
+/model_encoder_layer_4_attention_self_value_bias'
+	linear_26
+
+
+€
+€*
+name
+
+
"
+	linear_26b
+const
+var_423
+
+
+*
+name
+
+
+"
+op_423*"
+val
+
+
+
+	
+€ x
+reshape
+x
+
+	linear_26
+shape
+	
+var_423'
+x_55
+
+
+€
+
+ *
+name
+
+
+"
+x_55a
+const
+var_425
+
+
+*
+name
+
+
+"
+op_425*!
+val
+
+
+
+
+
+�b
+const
+var_429
+
+
+*
+name
+
+
+"
+op_429*"
+val
+
+
+
+	
+€ x
+reshape
+x
+
+	linear_24
+shape
+	
+var_429'
+x_59
+
+
+€
+
+ *
+name
+
+
+"
+x_59ƒ
+const)
+!attention_scores_17_transpose_x_0
+*7
+name/
+'
+%"#
+!attention_scores_17_transpose_x_0*
+val
+
+
+�ƒ
+const)
+!attention_scores_17_transpose_y_0
+*7
+name/
+'
+%"#
+!attention_scores_17_transpose_y_0*
+val
+
+
+�z
+const#
+transpose_44_perm_0
+
+
+*)
+name!
+
+"
+transpose_44_perm_0*!
+val
+
+
+
+
+
+�z
+const#
+transpose_45_perm_0
+
+
+*)
+name!
+
+"
+transpose_45_perm_0*!
+val
+
+
+
+
+
+�
+	transpose
+x
+
+x_51
+perm
+
+transpose_45_perm_0/
+transpose_89
+
+
+
+ 
+€*"
+name
+
+"
+transpose_89
+	transpose
+x
+
+x_59
+perm
+
+transpose_44_perm_0/
+transpose_90
+
+
+
+€
+ *"
+name
+
+"
+transpose_90†
+matmul
+x
+
+transpose_90
+y
+
+transpose_894
+transpose_x%
+#
+!attention_scores_17_transpose_x_04
+transpose_y%
+#
+!attention_scores_17_transpose_y_07
+attention_scores_17 
+
+
+
+€
+€*)
+name!
+
+"
+attention_scores_17†
+const)
+!_inversed_attention_scores_19_y_0
+*7
+name/
+'
+%"#
+!_inversed_attention_scores_19_y_0*
+val
+
+
+
+
+ó5>Ç
+mul
+x
+
+attention_scores_17*
+y%
+#
+!_inversed_attention_scores_19_y_0A
+_inversed_attention_scores_19 
+
+
+
+€
+€*3
+name+
+#
+!"
+_inversed_attention_scores_19–
+add&
+x!
+
+_inversed_attention_scores_19
+y
+
+attention_mask_1,
+input_99 
+
+
+
+€
+€*
+name
+
+"
+
+input_99
+softmax
+x
+
+
+input_99
+axis	
+
+var_8-
+	input_101 
+
+
+
+€
+€*
+name
+
+
"
+	input_101}
+const&
+context_layer_17_transpose_x_0
+*4
+name,
+$
+"" 
+context_layer_17_transpose_x_0*
+val
+
+
+�}
+const&
+context_layer_17_transpose_y_0
+*4
+name,
+$
+"" 
+context_layer_17_transpose_y_0*
+val
+
+
+�„
+	transpose
+x
+
+x_55
+perm
+	
+var_425/
+transpose_91
+
+
+
+€
+ *"
+name
+
+"
+transpose_91ö
+matmul
+x
+
+	input_101
+y
+
+transpose_911
+transpose_x"
+ 
+context_layer_17_transpose_x_01
+transpose_y"
+ 
+context_layer_17_transpose_y_03
+context_layer_17
+
+
+
+€
+ *&
+name
+
+"
+context_layer_17a
+const
+var_441
+
+
+*
+name
+
+
+"
+op_441*!
+val
+
+
+
+
+
+�b
+const
+var_446
+
+
+*
+name
+
+
+"
+op_446*"
+val
+
+
+
+	
+€€
+	transpose
+x
+
+context_layer_17
+perm
+	
+var_441/
+transpose_88
+
+
+€
+
+ *"
+name
+
+"
+transpose_88€
+reshape
+x
+
+transpose_88
+shape
+	
+var_446'
+	input_103
+
+
+€
+€*
+name
+
+
"
+	input_103è
+linear
+x
+
+	input_103A
+weight7
+5
+3model_encoder_layer_4_attention_output_dense_weight=
+bias5
+3
+1model_encoder_layer_4_attention_output_dense_bias'
+	linear_27
+
+
+€
+€*
+name
+
+
"
+	linear_27v
+add
+x
+
+	linear_27
+y
+
+
+input_97'
+	input_107
+
+
+€
+€*
+name
+
+
"
+	input_107z
+const 
+input_109_axes_0
+
+
+*&
+name
+
+"
+input_109_axes_0*'
+val 
+
+
+
+
+
+ÿÿÿÿÿÿÿÿÿ¨
+
+layer_norm
+x
+
+	input_107
+axes
+
+input_109_axes_0D
+gamma;
+9
+7model_encoder_layer_4_attention_output_LayerNorm_weightA
+beta9
+7
+5model_encoder_layer_4_attention_output_LayerNorm_bias
+epsilon
+
+
+var_10'
+	input_109
+
+
+€
+€*
+name
+
+
"
+	input_109à
+linear
+x
+
+	input_109=
+weight3
+1
+/model_encoder_layer_4_intermediate_dense_weight9
+bias1
+/
+-model_encoder_layer_4_intermediate_dense_bias'
+	linear_28
+
+
+€
+€*
+name
+
+
"
+	linear_28e
+const
+input_113_mode_0
+*&
+name
+
+"
+input_113_mode_0*
+val
+
+	"
+EXACT‚
+gelu
+x
+
+	linear_28
+mode
+
+input_113_mode_0'
+	input_113
+
+
+€
+€*
+name
+
+
"
+	input_113Ô
+linear
+x
+
+	input_1137
+weight-
++
+)model_encoder_layer_4_output_dense_weight3
+bias+
+)
+'model_encoder_layer_4_output_dense_bias'
+	linear_29
+
+
+€
+€*
+name
+
+
"
+	linear_29w
+add
+x
+
+	linear_29
+y
+
+	input_109'
+	input_117
+
+
+€
+€*
+name
+
+
"
+	input_117z
+const 
+input_119_axes_0
+
+
+*&
+name
+
+"
+input_119_axes_0*'
+val 
+
+
+
+
+
+ÿÿÿÿÿÿÿÿÿ”
+
+layer_norm
+x
+
+	input_117
+axes
+
+input_119_axes_0:
+gamma1
+/
+-model_encoder_layer_4_output_LayerNorm_weight7
+beta/
+-
++model_encoder_layer_4_output_LayerNorm_bias
+epsilon
+
+
+var_10'
+	input_119
+
+
+€
+€*
+name
+
+
"
+	input_119ä
+linear
+x
+
+	input_119?
+weight5
+3
+1model_encoder_layer_5_attention_self_query_weight;
+bias3
+1
+/model_encoder_layer_5_attention_self_query_bias'
+	linear_30
+
+
+€
+€*
+name
+
+
"
+	linear_30à
+linear
+x
+
+	input_119=
+weight3
+1
+/model_encoder_layer_5_attention_self_key_weight9
+bias1
+/
+-model_encoder_layer_5_attention_self_key_bias'
+	linear_31
+
+
+€
+€*
+name
+
+
"
+	linear_31b
+const
+var_491
+
+
+*
+name
+
+
+"
+op_491*"
+val
+
+
+
+	
+€ x
+reshape
+x
+
+	linear_31
+shape
+	
+var_491'
+x_63
+
+
+€
+
+ *
+name
+
+
+"
+x_63ä
+linear
+x
+
+	input_119?
+weight5
+3
+1model_encoder_layer_5_attention_self_value_weight;
+bias3
+1
+/model_encoder_layer_5_attention_self_value_bias'
+	linear_32
+
+
+€
+€*
+name
+
+
"
+	linear_32b
+const
+var_500
+
+
+*
+name
+
+
+"
+op_500*"
+val
+
+
+
+	
+€ x
+reshape
+x
+
+	linear_32
+shape
+	
+var_500'
+x_67
+
+
+€
+
+ *
+name
+
+
+"
+x_67a
+const
+var_502
+
+
+*
+name
+
+
+"
+op_502*!
+val
+
+
+
+
+
+�b
+const
+var_506
+
+
+*
+name
+
+
+"
+op_506*"
+val
+
+
+
+	
+€ x
+reshape
+x
+
+	linear_30
+shape
+	
+var_506'
+x_71
+
+
+€
+
+ *
+name
+
+
+"
+x_71ƒ
+const)
+!attention_scores_21_transpose_x_0
+*7
+name/
+'
+%"#
+!attention_scores_21_transpose_x_0*
+val
+
+
+�ƒ
+const)
+!attention_scores_21_transpose_y_0
+*7
+name/
+'
+%"#
+!attention_scores_21_transpose_y_0*
+val
+
+
+�z
+const#
+transpose_46_perm_0
+
+
+*)
+name!
+
+"
+transpose_46_perm_0*!
+val
+
+
+
+
+
+�z
+const#
+transpose_47_perm_0
+
+
+*)
+name!
+
+"
+transpose_47_perm_0*!
+val
+
+
+
+
+
+�
+	transpose
+x
+
+x_63
+perm
+
+transpose_47_perm_0/
+transpose_85
+
+
+
+ 
+€*"
+name
+
+"
+transpose_85
+	transpose
+x
+
+x_71
+perm
+
+transpose_46_perm_0/
+transpose_86
+
+
+
+€
+ *"
+name
+
+"
+transpose_86†
+matmul
+x
+
+transpose_86
+y
+
+transpose_854
+transpose_x%
+#
+!attention_scores_21_transpose_x_04
+transpose_y%
+#
+!attention_scores_21_transpose_y_07
+attention_scores_21 
+
+
+
+€
+€*)
+name!
+
+"
+attention_scores_21†
+const)
+!_inversed_attention_scores_23_y_0
+*7
+name/
+'
+%"#
+!_inversed_attention_scores_23_y_0*
+val
+
+
+
+
+ó5>Ç
+mul
+x
+
+attention_scores_21*
+y%
+#
+!_inversed_attention_scores_23_y_0A
+_inversed_attention_scores_23 
+
+
+
+€
+€*3
+name+
+#
+!"
+_inversed_attention_scores_23˜
+add&
+x!
+
+_inversed_attention_scores_23
+y
+
+attention_mask_1-
+	input_121 
+
+
+
+€
+€*
+name
+
+
"
+	input_121€
+softmax
+x
+
+	input_121
+axis	
+
+var_8-
+	input_123 
+
+
+
+€
+€*
+name
+
+
"
+	input_123}
+const&
+context_layer_21_transpose_x_0
+*4
+name,
+$
+"" 
+context_layer_21_transpose_x_0*
+val
+
+
+�}
+const&
+context_layer_21_transpose_y_0
+*4
+name,
+$
+"" 
+context_layer_21_transpose_y_0*
+val
+
+
+�„
+	transpose
+x
+
+x_67
+perm
+	
+var_502/
+transpose_87
+
+
+
+€
+ *"
+name
+
+"
+transpose_87ö
+matmul
+x
+
+	input_123
+y
+
+transpose_871
+transpose_x"
+ 
+context_layer_21_transpose_x_01
+transpose_y"
+ 
+context_layer_21_transpose_y_03
+context_layer_21
+
+
+
+€
+ *&
+name
+
+"
+context_layer_21a
+const
+var_518
+
+
+*
+name
+
+
+"
+op_518*!
+val
+
+
+
+
+
+�b
+const
+var_523
+
+
+*
+name
+
+
+"
+op_523*"
+val
+
+
+
+	
+€€
+	transpose
+x
+
+context_layer_21
+perm
+	
+var_518/
+transpose_84
+
+
+€
+
+ *"
+name
+
+"
+transpose_84€
+reshape
+x
+
+transpose_84
+shape
+	
+var_523'
+	input_125
+
+
+€
+€*
+name
+
+
"
+	input_125è
+linear
+x
+
+	input_125A
+weight7
+5
+3model_encoder_layer_5_attention_output_dense_weight=
+bias5
+3
+1model_encoder_layer_5_attention_output_dense_bias'
+	linear_33
+
+
+€
+€*
+name
+
+
"
+	linear_33w
+add
+x
+
+	linear_33
+y
+
+	input_119'
+	input_129
+
+
+€
+€*
+name
+
+
"
+	input_129z
+const 
+input_131_axes_0
+
+
+*&
+name
+
+"
+input_131_axes_0*'
+val 
+
+
+
+
+
+ÿÿÿÿÿÿÿÿÿ¨
+
+layer_norm
+x
+
+	input_129
+axes
+
+input_131_axes_0D
+gamma;
+9
+7model_encoder_layer_5_attention_output_LayerNorm_weightA
+beta9
+7
+5model_encoder_layer_5_attention_output_LayerNorm_bias
+epsilon
+
+
+var_10'
+	input_131
+
+
+€
+€*
+name
+
+
"
+	input_131à
+linear
+x
+
+	input_131=
+weight3
+1
+/model_encoder_layer_5_intermediate_dense_weight9
+bias1
+/
+-model_encoder_layer_5_intermediate_dense_bias'
+	linear_34
+
+
+€
+€*
+name
+
+
"
+	linear_34e
+const
+input_135_mode_0
+*&
+name
+
+"
+input_135_mode_0*
+val
+
+	"
+EXACT‚
+gelu
+x
+
+	linear_34
+mode
+
+input_135_mode_0'
+	input_135
+
+
+€
+€*
+name
+
+
"
+	input_135Ô
+linear
+x
+
+	input_1357
+weight-
++
+)model_encoder_layer_5_output_dense_weight3
+bias+
+)
+'model_encoder_layer_5_output_dense_bias'
+	linear_35
+
+
+€
+€*
+name
+
+
"
+	linear_35w
+add
+x
+
+	linear_35
+y
+
+	input_131'
+	input_139
+
+
+€
+€*
+name
+
+
"
+	input_139z
+const 
+input_141_axes_0
+
+
+*&
+name
+
+"
+input_141_axes_0*'
+val 
+
+
+
+
+
+ÿÿÿÿÿÿÿÿÿ”
+
+layer_norm
+x
+
+	input_139
+axes
+
+input_141_axes_0:
+gamma1
+/
+-model_encoder_layer_5_output_LayerNorm_weight7
+beta/
+-
++model_encoder_layer_5_output_LayerNorm_bias
+epsilon
+
+
+var_10'
+	input_141
+
+
+€
+€*
+name
+
+
"
+	input_141ä
+linear
+x
+
+	input_141?
+weight5
+3
+1model_encoder_layer_6_attention_self_query_weight;
+bias3
+1
+/model_encoder_layer_6_attention_self_query_bias'
+	linear_36
+
+
+€
+€*
+name
+
+
"
+	linear_36à
+linear
+x
+
+	input_141=
+weight3
+1
+/model_encoder_layer_6_attention_self_key_weight9
+bias1
+/
+-model_encoder_layer_6_attention_self_key_bias'
+	linear_37
+
+
+€
+€*
+name
+
+
"
+	linear_37b
+const
+var_568
+
+
+*
+name
+
+
+"
+op_568*"
+val
+
+
+
+	
+€ x
+reshape
+x
+
+	linear_37
+shape
+	
+var_568'
+x_75
+
+
+€
+
+ *
+name
+
+
+"
+x_75ä
+linear
+x
+
+	input_141?
+weight5
+3
+1model_encoder_layer_6_attention_self_value_weight;
+bias3
+1
+/model_encoder_layer_6_attention_self_value_bias'
+	linear_38
+
+
+€
+€*
+name
+
+
"
+	linear_38b
+const
+var_577
+
+
+*
+name
+
+
+"
+op_577*"
+val
+
+
+
+	
+€ x
+reshape
+x
+
+	linear_38
+shape
+	
+var_577'
+x_79
+
+
+€
+
+ *
+name
+
+
+"
+x_79a
+const
+var_579
+
+
+*
+name
+
+
+"
+op_579*!
+val
+
+
+
+
+
+�b
+const
+var_583
+
+
+*
+name
+
+
+"
+op_583*"
+val
+
+
+
+	
+€ x
+reshape
+x
+
+	linear_36
+shape
+	
+var_583'
+x_83
+
+
+€
+
+ *
+name
+
+
+"
+x_83ƒ
+const)
+!attention_scores_25_transpose_x_0
+*7
+name/
+'
+%"#
+!attention_scores_25_transpose_x_0*
+val
+
+
+�ƒ
+const)
+!attention_scores_25_transpose_y_0
+*7
+name/
+'
+%"#
+!attention_scores_25_transpose_y_0*
+val
+
+
+�z
+const#
+transpose_48_perm_0
+
+
+*)
+name!
+
+"
+transpose_48_perm_0*!
+val
+
+
+
+
+
+�z
+const#
+transpose_49_perm_0
+
+
+*)
+name!
+
+"
+transpose_49_perm_0*!
+val
+
+
+
+
+
+�
+	transpose
+x
+
+x_75
+perm
+
+transpose_49_perm_0/
+transpose_81
+
+
+
+ 
+€*"
+name
+
+"
+transpose_81
+	transpose
+x
+
+x_83
+perm
+
+transpose_48_perm_0/
+transpose_82
+
+
+
+€
+ *"
+name
+
+"
+transpose_82†
+matmul
+x
+
+transpose_82
+y
+
+transpose_814
+transpose_x%
+#
+!attention_scores_25_transpose_x_04
+transpose_y%
+#
+!attention_scores_25_transpose_y_07
+attention_scores_25 
+
+
+
+€
+€*)
+name!
+
+"
+attention_scores_25†
+const)
+!_inversed_attention_scores_27_y_0
+*7
+name/
+'
+%"#
+!_inversed_attention_scores_27_y_0*
+val
+
+
+
+
+ó5>Ç
+mul
+x
+
+attention_scores_25*
+y%
+#
+!_inversed_attention_scores_27_y_0A
+_inversed_attention_scores_27 
+
+
+
+€
+€*3
+name+
+#
+!"
+_inversed_attention_scores_27˜
+add&
+x!
+
+_inversed_attention_scores_27
+y
+
+attention_mask_1-
+	input_143 
+
+
+
+€
+€*
+name
+
+
"
+	input_143€
+softmax
+x
+
+	input_143
+axis	
+
+var_8-
+	input_145 
+
+
+
+€
+€*
+name
+
+
"
+	input_145}
+const&
+context_layer_25_transpose_x_0
+*4
+name,
+$
+"" 
+context_layer_25_transpose_x_0*
+val
+
+
+�}
+const&
+context_layer_25_transpose_y_0
+*4
+name,
+$
+"" 
+context_layer_25_transpose_y_0*
+val
+
+
+�„
+	transpose
+x
+
+x_79
+perm
+	
+var_579/
+transpose_83
+
+
+
+€
+ *"
+name
+
+"
+transpose_83ö
+matmul
+x
+
+	input_145
+y
+
+transpose_831
+transpose_x"
+ 
+context_layer_25_transpose_x_01
+transpose_y"
+ 
+context_layer_25_transpose_y_03
+context_layer_25
+
+
+
+€
+ *&
+name
+
+"
+context_layer_25a
+const
+var_595
+
+
+*
+name
+
+
+"
+op_595*!
+val
+
+
+
+
+
+�b
+const
+var_600
+
+
+*
+name
+
+
+"
+op_600*"
+val
+
+
+
+	
+€€
+	transpose
+x
+
+context_layer_25
+perm
+	
+var_595/
+transpose_80
+
+
+€
+
+ *"
+name
+
+"
+transpose_80€
+reshape
+x
+
+transpose_80
+shape
+	
+var_600'
+	input_147
+
+
+€
+€*
+name
+
+
"
+	input_147è
+linear
+x
+
+	input_147A
+weight7
+5
+3model_encoder_layer_6_attention_output_dense_weight=
+bias5
+3
+1model_encoder_layer_6_attention_output_dense_bias'
+	linear_39
+
+
+€
+€*
+name
+
+
"
+	linear_39w
+add
+x
+
+	linear_39
+y
+
+	input_141'
+	input_151
+
+
+€
+€*
+name
+
+
"
+	input_151z
+const 
+input_153_axes_0
+
+
+*&
+name
+
+"
+input_153_axes_0*'
+val 
+
+
+
+
+
+ÿÿÿÿÿÿÿÿÿ¨
+
+layer_norm
+x
+
+	input_151
+axes
+
+input_153_axes_0D
+gamma;
+9
+7model_encoder_layer_6_attention_output_LayerNorm_weightA
+beta9
+7
+5model_encoder_layer_6_attention_output_LayerNorm_bias
+epsilon
+
+
+var_10'
+	input_153
+
+
+€
+€*
+name
+
+
"
+	input_153à
+linear
+x
+
+	input_153=
+weight3
+1
+/model_encoder_layer_6_intermediate_dense_weight9
+bias1
+/
+-model_encoder_layer_6_intermediate_dense_bias'
+	linear_40
+
+
+€
+€*
+name
+
+
"
+	linear_40e
+const
+input_157_mode_0
+*&
+name
+
+"
+input_157_mode_0*
+val
+
+	"
+EXACT‚
+gelu
+x
+
+	linear_40
+mode
+
+input_157_mode_0'
+	input_157
+
+
+€
+€*
+name
+
+
"
+	input_157Ô
+linear
+x
+
+	input_1577
+weight-
++
+)model_encoder_layer_6_output_dense_weight3
+bias+
+)
+'model_encoder_layer_6_output_dense_bias'
+	linear_41
+
+
+€
+€*
+name
+
+
"
+	linear_41w
+add
+x
+
+	linear_41
+y
+
+	input_153'
+	input_161
+
+
+€
+€*
+name
+
+
"
+	input_161z
+const 
+input_163_axes_0
+
+
+*&
+name
+
+"
+input_163_axes_0*'
+val 
+
+
+
+
+
+ÿÿÿÿÿÿÿÿÿ”
+
+layer_norm
+x
+
+	input_161
+axes
+
+input_163_axes_0:
+gamma1
+/
+-model_encoder_layer_6_output_LayerNorm_weight7
+beta/
+-
++model_encoder_layer_6_output_LayerNorm_bias
+epsilon
+
+
+var_10'
+	input_163
+
+
+€
+€*
+name
+
+
"
+	input_163ä
+linear
+x
+
+	input_163?
+weight5
+3
+1model_encoder_layer_7_attention_self_query_weight;
+bias3
+1
+/model_encoder_layer_7_attention_self_query_bias'
+	linear_42
+
+
+€
+€*
+name
+
+
"
+	linear_42à
+linear
+x
+
+	input_163=
+weight3
+1
+/model_encoder_layer_7_attention_self_key_weight9
+bias1
+/
+-model_encoder_layer_7_attention_self_key_bias'
+	linear_43
+
+
+€
+€*
+name
+
+
"
+	linear_43b
+const
+var_645
+
+
+*
+name
+
+
+"
+op_645*"
+val
+
+
+
+	
+€ x
+reshape
+x
+
+	linear_43
+shape
+	
+var_645'
+x_87
+
+
+€
+
+ *
+name
+
+
+"
+x_87ä
+linear
+x
+
+	input_163?
+weight5
+3
+1model_encoder_layer_7_attention_self_value_weight;
+bias3
+1
+/model_encoder_layer_7_attention_self_value_bias'
+	linear_44
+
+
+€
+€*
+name
+
+
"
+	linear_44b
+const
+var_654
+
+
+*
+name
+
+
+"
+op_654*"
+val
+
+
+
+	
+€ x
+reshape
+x
+
+	linear_44
+shape
+	
+var_654'
+x_91
+
+
+€
+
+ *
+name
+
+
+"
+x_91a
+const
+var_656
+
+
+*
+name
+
+
+"
+op_656*!
+val
+
+
+
+
+
+�b
+const
+var_660
+
+
+*
+name
+
+
+"
+op_660*"
+val
+
+
+
+	
+€ x
+reshape
+x
+
+	linear_42
+shape
+	
+var_660'
+x_95
+
+
+€
+
+ *
+name
+
+
+"
+x_95ƒ
+const)
+!attention_scores_29_transpose_x_0
+*7
+name/
+'
+%"#
+!attention_scores_29_transpose_x_0*
+val
+
+
+�ƒ
+const)
+!attention_scores_29_transpose_y_0
+*7
+name/
+'
+%"#
+!attention_scores_29_transpose_y_0*
+val
+
+
+�z
+const#
+transpose_50_perm_0
+
+
+*)
+name!
+
+"
+transpose_50_perm_0*!
+val
+
+
+
+
+
+�z
+const#
+transpose_51_perm_0
+
+
+*)
+name!
+
+"
+transpose_51_perm_0*!
+val
+
+
+
+
+
+�
+	transpose
+x
+
+x_87
+perm
+
+transpose_51_perm_0/
+transpose_77
+
+
+
+ 
+€*"
+name
+
+"
+transpose_77
+	transpose
+x
+
+x_95
+perm
+
+transpose_50_perm_0/
+transpose_78
+
+
+
+€
+ *"
+name
+
+"
+transpose_78†
+matmul
+x
+
+transpose_78
+y
+
+transpose_774
+transpose_x%
+#
+!attention_scores_29_transpose_x_04
+transpose_y%
+#
+!attention_scores_29_transpose_y_07
+attention_scores_29 
+
+
+
+€
+€*)
+name!
+
+"
+attention_scores_29†
+const)
+!_inversed_attention_scores_31_y_0
+*7
+name/
+'
+%"#
+!_inversed_attention_scores_31_y_0*
+val
+
+
+
+
+ó5>Ç
+mul
+x
+
+attention_scores_29*
+y%
+#
+!_inversed_attention_scores_31_y_0A
+_inversed_attention_scores_31 
+
+
+
+€
+€*3
+name+
+#
+!"
+_inversed_attention_scores_31˜
+add&
+x!
+
+_inversed_attention_scores_31
+y
+
+attention_mask_1-
+	input_165 
+
+
+
+€
+€*
+name
+
+
"
+	input_165€
+softmax
+x
+
+	input_165
+axis	
+
+var_8-
+	input_167 
+
+
+
+€
+€*
+name
+
+
"
+	input_167}
+const&
+context_layer_29_transpose_x_0
+*4
+name,
+$
+"" 
+context_layer_29_transpose_x_0*
+val
+
+
+�}
+const&
+context_layer_29_transpose_y_0
+*4
+name,
+$
+"" 
+context_layer_29_transpose_y_0*
+val
+
+
+�„
+	transpose
+x
+
+x_91
+perm
+	
+var_656/
+transpose_79
+
+
+
+€
+ *"
+name
+
+"
+transpose_79ö
+matmul
+x
+
+	input_167
+y
+
+transpose_791
+transpose_x"
+ 
+context_layer_29_transpose_x_01
+transpose_y"
+ 
+context_layer_29_transpose_y_03
+context_layer_29
+
+
+
+€
+ *&
+name
+
+"
+context_layer_29a
+const
+var_672
+
+
+*
+name
+
+
+"
+op_672*!
+val
+
+
+
+
+
+�b
+const
+var_677
+
+
+*
+name
+
+
+"
+op_677*"
+val
+
+
+
+	
+€€
+	transpose
+x
+
+context_layer_29
+perm
+	
+var_672/
+transpose_76
+
+
+€
+
+ *"
+name
+
+"
+transpose_76€
+reshape
+x
+
+transpose_76
+shape
+	
+var_677'
+	input_169
+
+
+€
+€*
+name
+
+
"
+	input_169è
+linear
+x
+
+	input_169A
+weight7
+5
+3model_encoder_layer_7_attention_output_dense_weight=
+bias5
+3
+1model_encoder_layer_7_attention_output_dense_bias'
+	linear_45
+
+
+€
+€*
+name
+
+
"
+	linear_45w
+add
+x
+
+	linear_45
+y
+
+	input_163'
+	input_173
+
+
+€
+€*
+name
+
+
"
+	input_173z
+const 
+input_175_axes_0
+
+
+*&
+name
+
+"
+input_175_axes_0*'
+val 
+
+
+
+
+
+ÿÿÿÿÿÿÿÿÿ¨
+
+layer_norm
+x
+
+	input_173
+axes
+
+input_175_axes_0D
+gamma;
+9
+7model_encoder_layer_7_attention_output_LayerNorm_weightA
+beta9
+7
+5model_encoder_layer_7_attention_output_LayerNorm_bias
+epsilon
+
+
+var_10'
+	input_175
+
+
+€
+€*
+name
+
+
"
+	input_175à
+linear
+x
+
+	input_175=
+weight3
+1
+/model_encoder_layer_7_intermediate_dense_weight9
+bias1
+/
+-model_encoder_layer_7_intermediate_dense_bias'
+	linear_46
+
+
+€
+€*
+name
+
+
"
+	linear_46e
+const
+input_179_mode_0
+*&
+name
+
+"
+input_179_mode_0*
+val
+
+	"
+EXACT‚
+gelu
+x
+
+	linear_46
+mode
+
+input_179_mode_0'
+	input_179
+
+
+€
+€*
+name
+
+
"
+	input_179Ô
+linear
+x
+
+	input_1797
+weight-
++
+)model_encoder_layer_7_output_dense_weight3
+bias+
+)
+'model_encoder_layer_7_output_dense_bias'
+	linear_47
+
+
+€
+€*
+name
+
+
"
+	linear_47w
+add
+x
+
+	linear_47
+y
+
+	input_175'
+	input_183
+
+
+€
+€*
+name
+
+
"
+	input_183z
+const 
+input_185_axes_0
+
+
+*&
+name
+
+"
+input_185_axes_0*'
+val 
+
+
+
+
+
+ÿÿÿÿÿÿÿÿÿ”
+
+layer_norm
+x
+
+	input_183
+axes
+
+input_185_axes_0:
+gamma1
+/
+-model_encoder_layer_7_output_LayerNorm_weight7
+beta/
+-
++model_encoder_layer_7_output_LayerNorm_bias
+epsilon
+
+
+var_10'
+	input_185
+
+
+€
+€*
+name
+
+
"
+	input_185ä
+linear
+x
+
+	input_185?
+weight5
+3
+1model_encoder_layer_8_attention_self_query_weight;
+bias3
+1
+/model_encoder_layer_8_attention_self_query_bias'
+	linear_48
+
+
+€
+€*
+name
+
+
"
+	linear_48à
+linear
+x
+
+	input_185=
+weight3
+1
+/model_encoder_layer_8_attention_self_key_weight9
+bias1
+/
+-model_encoder_layer_8_attention_self_key_bias'
+	linear_49
+
+
+€
+€*
+name
+
+
"
+	linear_49b
+const
+var_722
+
+
+*
+name
+
+
+"
+op_722*"
+val
+
+
+
+	
+€ x
+reshape
+x
+
+	linear_49
+shape
+	
+var_722'
+x_99
+
+
+€
+
+ *
+name
+
+
+"
+x_99ä
+linear
+x
+
+	input_185?
+weight5
+3
+1model_encoder_layer_8_attention_self_value_weight;
+bias3
+1
+/model_encoder_layer_8_attention_self_value_bias'
+	linear_50
+
+
+€
+€*
+name
+
+
"
+	linear_50b
+const
+var_731
+
+
+*
+name
+
+
+"
+op_731*"
+val
+
+
+
+	
+€ z
+reshape
+x
+
+	linear_50
+shape
+	
+var_731(
+x_103
+
+
+€
+
+ *
+name
+
+	"
+x_103a
+const
+var_733
+
+
+*
+name
+
+
+"
+op_733*!
+val
+
+
+
+
+
+�b
+const
+var_737
+
+
+*
+name
+
+
+"
+op_737*"
+val
+
+
+
+	
+€ z
+reshape
+x
+
+	linear_48
+shape
+	
+var_737(
+x_107
+
+
+€
+
+ *
+name
+
+	"
+x_107ƒ
+const)
+!attention_scores_33_transpose_x_0
+*7
+name/
+'
+%"#
+!attention_scores_33_transpose_x_0*
+val
+
+
+�ƒ
+const)
+!attention_scores_33_transpose_y_0
+*7
+name/
+'
+%"#
+!attention_scores_33_transpose_y_0*
+val
+
+
+�z
+const#
+transpose_52_perm_0
+
+
+*)
+name!
+
+"
+transpose_52_perm_0*!
+val
+
+
+
+
+
+�z
+const#
+transpose_53_perm_0
+
+
+*)
+name!
+
+"
+transpose_53_perm_0*!
+val
+
+
+
+
+
+�
+	transpose
+x
+
+x_99
+perm
+
+transpose_53_perm_0/
+transpose_73
+
+
+
+ 
+€*"
+name
+
+"
+transpose_73‘
+	transpose
+x	
+
+x_107
+perm
+
+transpose_52_perm_0/
+transpose_74
+
+
+
+€
+ *"
+name
+
+"
+transpose_74†
+matmul
+x
+
+transpose_74
+y
+
+transpose_734
+transpose_x%
+#
+!attention_scores_33_transpose_x_04
+transpose_y%
+#
+!attention_scores_33_transpose_y_07
+attention_scores_33 
+
+
+
+€
+€*)
+name!
+
+"
+attention_scores_33†
+const)
+!_inversed_attention_scores_35_y_0
+*7
+name/
+'
+%"#
+!_inversed_attention_scores_35_y_0*
+val
+
+
+
+
+ó5>Ç
+mul
+x
+
+attention_scores_33*
+y%
+#
+!_inversed_attention_scores_35_y_0A
+_inversed_attention_scores_35 
+
+
+
+€
+€*3
+name+
+#
+!"
+_inversed_attention_scores_35˜
+add&
+x!
+
+_inversed_attention_scores_35
+y
+
+attention_mask_1-
+	input_187 
+
+
+
+€
+€*
+name
+
+
"
+	input_187€
+softmax
+x
+
+	input_187
+axis	
+
+var_8-
+	input_189 
+
+
+
+€
+€*
+name
+
+
"
+	input_189}
+const&
+context_layer_33_transpose_x_0
+*4
+name,
+$
+"" 
+context_layer_33_transpose_x_0*
+val
+
+
+�}
+const&
+context_layer_33_transpose_y_0
+*4
+name,
+$
+"" 
+context_layer_33_transpose_y_0*
+val
+
+
+�…
+	transpose
+x	
+
+x_103
+perm
+	
+var_733/
+transpose_75
+
+
+
+€
+ *"
+name
+
+"
+transpose_75ö
+matmul
+x
+
+	input_189
+y
+
+transpose_751
+transpose_x"
+ 
+context_layer_33_transpose_x_01
+transpose_y"
+ 
+context_layer_33_transpose_y_03
+context_layer_33
+
+
+
+€
+ *&
+name
+
+"
+context_layer_33a
+const
+var_749
+
+
+*
+name
+
+
+"
+op_749*!
+val
+
+
+
+
+
+�b
+const
+var_754
+
+
+*
+name
+
+
+"
+op_754*"
+val
+
+
+
+	
+€€
+	transpose
+x
+
+context_layer_33
+perm
+	
+var_749/
+transpose_72
+
+
+€
+
+ *"
+name
+
+"
+transpose_72€
+reshape
+x
+
+transpose_72
+shape
+	
+var_754'
+	input_191
+
+
+€
+€*
+name
+
+
"
+	input_191è
+linear
+x
+
+	input_191A
+weight7
+5
+3model_encoder_layer_8_attention_output_dense_weight=
+bias5
+3
+1model_encoder_layer_8_attention_output_dense_bias'
+	linear_51
+
+
+€
+€*
+name
+
+
"
+	linear_51w
+add
+x
+
+	linear_51
+y
+
+	input_185'
+	input_195
+
+
+€
+€*
+name
+
+
"
+	input_195z
+const 
+input_197_axes_0
+
+
+*&
+name
+
+"
+input_197_axes_0*'
+val 
+
+
+
+
+
+ÿÿÿÿÿÿÿÿÿ¨
+
+layer_norm
+x
+
+	input_195
+axes
+
+input_197_axes_0D
+gamma;
+9
+7model_encoder_layer_8_attention_output_LayerNorm_weightA
+beta9
+7
+5model_encoder_layer_8_attention_output_LayerNorm_bias
+epsilon
+
+
+var_10'
+	input_197
+
+
+€
+€*
+name
+
+
"
+	input_197à
+linear
+x
+
+	input_197=
+weight3
+1
+/model_encoder_layer_8_intermediate_dense_weight9
+bias1
+/
+-model_encoder_layer_8_intermediate_dense_bias'
+	linear_52
+
+
+€
+€*
+name
+
+
"
+	linear_52e
+const
+input_201_mode_0
+*&
+name
+
+"
+input_201_mode_0*
+val
+
+	"
+EXACT‚
+gelu
+x
+
+	linear_52
+mode
+
+input_201_mode_0'
+	input_201
+
+
+€
+€*
+name
+
+
"
+	input_201Ô
+linear
+x
+
+	input_2017
+weight-
++
+)model_encoder_layer_8_output_dense_weight3
+bias+
+)
+'model_encoder_layer_8_output_dense_bias'
+	linear_53
+
+
+€
+€*
+name
+
+
"
+	linear_53w
+add
+x
+
+	linear_53
+y
+
+	input_197'
+	input_205
+
+
+€
+€*
+name
+
+
"
+	input_205z
+const 
+input_207_axes_0
+
+
+*&
+name
+
+"
+input_207_axes_0*'
+val 
+
+
+
+
+
+ÿÿÿÿÿÿÿÿÿ”
+
+layer_norm
+x
+
+	input_205
+axes
+
+input_207_axes_0:
+gamma1
+/
+-model_encoder_layer_8_output_LayerNorm_weight7
+beta/
+-
++model_encoder_layer_8_output_LayerNorm_bias
+epsilon
+
+
+var_10'
+	input_207
+
+
+€
+€*
+name
+
+
"
+	input_207ä
+linear
+x
+
+	input_207?
+weight5
+3
+1model_encoder_layer_9_attention_self_query_weight;
+bias3
+1
+/model_encoder_layer_9_attention_self_query_bias'
+	linear_54
+
+
+€
+€*
+name
+
+
"
+	linear_54à
+linear
+x
+
+	input_207=
+weight3
+1
+/model_encoder_layer_9_attention_self_key_weight9
+bias1
+/
+-model_encoder_layer_9_attention_self_key_bias'
+	linear_55
+
+
+€
+€*
+name
+
+
"
+	linear_55b
+const
+var_799
+
+
+*
+name
+
+
+"
+op_799*"
+val
+
+
+
+	
+€ z
+reshape
+x
+
+	linear_55
+shape
+	
+var_799(
+x_111
+
+
+€
+
+ *
+name
+
+	"
+x_111ä
+linear
+x
+
+	input_207?
+weight5
+3
+1model_encoder_layer_9_attention_self_value_weight;
+bias3
+1
+/model_encoder_layer_9_attention_self_value_bias'
+	linear_56
+
+
+€
+€*
+name
+
+
"
+	linear_56b
+const
+var_808
+
+
+*
+name
+
+
+"
+op_808*"
+val
+
+
+
+	
+€ z
+reshape
+x
+
+	linear_56
+shape
+	
+var_808(
+x_115
+
+
+€
+
+ *
+name
+
+	"
+x_115a
+const
+var_810
+
+
+*
+name
+
+
+"
+op_810*!
+val
+
+
+
+
+
+�b
+const
+var_814
+
+
+*
+name
+
+
+"
+op_814*"
+val
+
+
+
+	
+€ z
+reshape
+x
+
+	linear_54
+shape
+	
+var_814(
+x_119
+
+
+€
+
+ *
+name
+
+	"
+x_119ƒ
+const)
+!attention_scores_37_transpose_x_0
+*7
+name/
+'
+%"#
+!attention_scores_37_transpose_x_0*
+val
+
+
+�ƒ
+const)
+!attention_scores_37_transpose_y_0
+*7
+name/
+'
+%"#
+!attention_scores_37_transpose_y_0*
+val
+
+
+�z
+const#
+transpose_54_perm_0
+
+
+*)
+name!
+
+"
+transpose_54_perm_0*!
+val
+
+
+
+
+
+�z
+const#
+transpose_55_perm_0
+
+
+*)
+name!
+
+"
+transpose_55_perm_0*!
+val
+
+
+
+
+
+�‘
+	transpose
+x	
+
+x_111
+perm
+
+transpose_55_perm_0/
+transpose_69
+
+
+
+ 
+€*"
+name
+
+"
+transpose_69‘
+	transpose
+x	
+
+x_119
+perm
+
+transpose_54_perm_0/
+transpose_70
+
+
+
+€
+ *"
+name
+
+"
+transpose_70†
+matmul
+x
+
+transpose_70
+y
+
+transpose_694
+transpose_x%
+#
+!attention_scores_37_transpose_x_04
+transpose_y%
+#
+!attention_scores_37_transpose_y_07
+attention_scores_37 
+
+
+
+€
+€*)
+name!
+
+"
+attention_scores_37†
+const)
+!_inversed_attention_scores_39_y_0
+*7
+name/
+'
+%"#
+!_inversed_attention_scores_39_y_0*
+val
+
+
+
+
+ó5>Ç
+mul
+x
+
+attention_scores_37*
+y%
+#
+!_inversed_attention_scores_39_y_0A
+_inversed_attention_scores_39 
+
+
+
+€
+€*3
+name+
+#
+!"
+_inversed_attention_scores_39˜
+add&
+x!
+
+_inversed_attention_scores_39
+y
+
+attention_mask_1-
+	input_209 
+
+
+
+€
+€*
+name
+
+
"
+	input_209€
+softmax
+x
+
+	input_209
+axis	
+
+var_8-
+	input_211 
+
+
+
+€
+€*
+name
+
+
"
+	input_211}
+const&
+context_layer_37_transpose_x_0
+*4
+name,
+$
+"" 
+context_layer_37_transpose_x_0*
+val
+
+
+�}
+const&
+context_layer_37_transpose_y_0
+*4
+name,
+$
+"" 
+context_layer_37_transpose_y_0*
+val
+
+
+�…
+	transpose
+x	
+
+x_115
+perm
+	
+var_810/
+transpose_71
+
+
+
+€
+ *"
+name
+
+"
+transpose_71ö
+matmul
+x
+
+	input_211
+y
+
+transpose_711
+transpose_x"
+ 
+context_layer_37_transpose_x_01
+transpose_y"
+ 
+context_layer_37_transpose_y_03
+context_layer_37
+
+
+
+€
+ *&
+name
+
+"
+context_layer_37a
+const
+var_826
+
+
+*
+name
+
+
+"
+op_826*!
+val
+
+
+
+
+
+�b
+const
+var_831
+
+
+*
+name
+
+
+"
+op_831*"
+val
+
+
+
+	
+€€
+	transpose
+x
+
+context_layer_37
+perm
+	
+var_826/
+transpose_68
+
+
+€
+
+ *"
+name
+
+"
+transpose_68€
+reshape
+x
+
+transpose_68
+shape
+	
+var_831'
+	input_213
+
+
+€
+€*
+name
+
+
"
+	input_213è
+linear
+x
+
+	input_213A
+weight7
+5
+3model_encoder_layer_9_attention_output_dense_weight=
+bias5
+3
+1model_encoder_layer_9_attention_output_dense_bias'
+	linear_57
+
+
+€
+€*
+name
+
+
"
+	linear_57w
+add
+x
+
+	linear_57
+y
+
+	input_207'
+	input_217
+
+
+€
+€*
+name
+
+
"
+	input_217z
+const 
+input_219_axes_0
+
+
+*&
+name
+
+"
+input_219_axes_0*'
+val 
+
+
+
+
+
+ÿÿÿÿÿÿÿÿÿ¨
+
+layer_norm
+x
+
+	input_217
+axes
+
+input_219_axes_0D
+gamma;
+9
+7model_encoder_layer_9_attention_output_LayerNorm_weightA
+beta9
+7
+5model_encoder_layer_9_attention_output_LayerNorm_bias
+epsilon
+
+
+var_10'
+	input_219
+
+
+€
+€*
+name
+
+
"
+	input_219à
+linear
+x
+
+	input_219=
+weight3
+1
+/model_encoder_layer_9_intermediate_dense_weight9
+bias1
+/
+-model_encoder_layer_9_intermediate_dense_bias'
+	linear_58
+
+
+€
+€*
+name
+
+
"
+	linear_58e
+const
+input_223_mode_0
+*&
+name
+
+"
+input_223_mode_0*
+val
+
+	"
+EXACT‚
+gelu
+x
+
+	linear_58
+mode
+
+input_223_mode_0'
+	input_223
+
+
+€
+€*
+name
+
+
"
+	input_223Ô
+linear
+x
+
+	input_2237
+weight-
++
+)model_encoder_layer_9_output_dense_weight3
+bias+
+)
+'model_encoder_layer_9_output_dense_bias'
+	linear_59
+
+
+€
+€*
+name
+
+
"
+	linear_59w
+add
+x
+
+	linear_59
+y
+
+	input_219'
+	input_227
+
+
+€
+€*
+name
+
+
"
+	input_227z
+const 
+input_229_axes_0
+
+
+*&
+name
+
+"
+input_229_axes_0*'
+val 
+
+
+
+
+
+ÿÿÿÿÿÿÿÿÿ”
+
+layer_norm
+x
+
+	input_227
+axes
+
+input_229_axes_0:
+gamma1
+/
+-model_encoder_layer_9_output_LayerNorm_weight7
+beta/
+-
++model_encoder_layer_9_output_LayerNorm_bias
+epsilon
+
+
+var_10'
+	input_229
+
+
+€
+€*
+name
+
+
"
+	input_229æ
+linear
+x
+
+	input_229@
+weight6
+4
+2model_encoder_layer_10_attention_self_query_weight<
+bias4
+2
+0model_encoder_layer_10_attention_self_query_bias'
+	linear_60
+
+
+€
+€*
+name
+
+
"
+	linear_60â
+linear
+x
+
+	input_229>
+weight4
+2
+0model_encoder_layer_10_attention_self_key_weight:
+bias2
+0
+.model_encoder_layer_10_attention_self_key_bias'
+	linear_61
+
+
+€
+€*
+name
+
+
"
+	linear_61b
+const
+var_876
+
+
+*
+name
+
+
+"
+op_876*"
+val
+
+
+
+	
+€ z
+reshape
+x
+
+	linear_61
+shape
+	
+var_876(
+x_123
+
+
+€
+
+ *
+name
+
+	"
+x_123æ
+linear
+x
+
+	input_229@
+weight6
+4
+2model_encoder_layer_10_attention_self_value_weight<
+bias4
+2
+0model_encoder_layer_10_attention_self_value_bias'
+	linear_62
+
+
+€
+€*
+name
+
+
"
+	linear_62b
+const
+var_885
+
+
+*
+name
+
+
+"
+op_885*"
+val
+
+
+
+	
+€ z
+reshape
+x
+
+	linear_62
+shape
+	
+var_885(
+x_127
+
+
+€
+
+ *
+name
+
+	"
+x_127a
+const
+var_887
+
+
+*
+name
+
+
+"
+op_887*!
+val
+
+
+
+
+
+�b
+const
+var_891
+
+
+*
+name
+
+
+"
+op_891*"
+val
+
+
+
+	
+€ z
+reshape
+x
+
+	linear_60
+shape
+	
+var_891(
+x_131
+
+
+€
+
+ *
+name
+
+	"
+x_131ƒ
+const)
+!attention_scores_41_transpose_x_0
+*7
+name/
+'
+%"#
+!attention_scores_41_transpose_x_0*
+val
+
+
+�ƒ
+const)
+!attention_scores_41_transpose_y_0
+*7
+name/
+'
+%"#
+!attention_scores_41_transpose_y_0*
+val
+
+
+�z
+const#
+transpose_56_perm_0
+
+
+*)
+name!
+
+"
+transpose_56_perm_0*!
+val
+
+
+
+
+
+�z
+const#
+transpose_57_perm_0
+
+
+*)
+name!
+
+"
+transpose_57_perm_0*!
+val
+
+
+
+
+
+�‘
+	transpose
+x	
+
+x_123
+perm
+
+transpose_57_perm_0/
+transpose_65
+
+
+
+ 
+€*"
+name
+
+"
+transpose_65‘
+	transpose
+x	
+
+x_131
+perm
+
+transpose_56_perm_0/
+transpose_66
+
+
+
+€
+ *"
+name
+
+"
+transpose_66†
+matmul
+x
+
+transpose_66
+y
+
+transpose_654
+transpose_x%
+#
+!attention_scores_41_transpose_x_04
+transpose_y%
+#
+!attention_scores_41_transpose_y_07
+attention_scores_41 
+
+
+
+€
+€*)
+name!
+
+"
+attention_scores_41†
+const)
+!_inversed_attention_scores_43_y_0
+*7
+name/
+'
+%"#
+!_inversed_attention_scores_43_y_0*
+val
+
+
+
+
+ó5>Ç
+mul
+x
+
+attention_scores_41*
+y%
+#
+!_inversed_attention_scores_43_y_0A
+_inversed_attention_scores_43 
+
+
+
+€
+€*3
+name+
+#
+!"
+_inversed_attention_scores_43˜
+add&
+x!
+
+_inversed_attention_scores_43
+y
+
+attention_mask_1-
+	input_231 
+
+
+
+€
+€*
+name
+
+
"
+	input_231€
+softmax
+x
+
+	input_231
+axis	
+
+var_8-
+	input_233 
+
+
+
+€
+€*
+name
+
+
"
+	input_233}
+const&
+context_layer_41_transpose_x_0
+*4
+name,
+$
+"" 
+context_layer_41_transpose_x_0*
+val
+
+
+�}
+const&
+context_layer_41_transpose_y_0
+*4
+name,
+$
+"" 
+context_layer_41_transpose_y_0*
+val
+
+
+�…
+	transpose
+x	
+
+x_127
+perm
+	
+var_887/
+transpose_67
+
+
+
+€
+ *"
+name
+
+"
+transpose_67ö
+matmul
+x
+
+	input_233
+y
+
+transpose_671
+transpose_x"
+ 
+context_layer_41_transpose_x_01
+transpose_y"
+ 
+context_layer_41_transpose_y_03
+context_layer_41
+
+
+
+€
+ *&
+name
+
+"
+context_layer_41a
+const
+var_903
+
+
+*
+name
+
+
+"
+op_903*!
+val
+
+
+
+
+
+�b
+const
+var_908
+
+
+*
+name
+
+
+"
+op_908*"
+val
+
+
+
+	
+€€
+	transpose
+x
+
+context_layer_41
+perm
+	
+var_903/
+transpose_64
+
+
+€
+
+ *"
+name
+
+"
+transpose_64€
+reshape
+x
+
+transpose_64
+shape
+	
+var_908'
+	input_235
+
+
+€
+€*
+name
+
+
"
+	input_235ê
+linear
+x
+
+	input_235B
+weight8
+6
+4model_encoder_layer_10_attention_output_dense_weight>
+bias6
+4
+2model_encoder_layer_10_attention_output_dense_bias'
+	linear_63
+
+
+€
+€*
+name
+
+
"
+	linear_63w
+add
+x
+
+	linear_63
+y
+
+	input_229'
+	input_239
+
+
+€
+€*
+name
+
+
"
+	input_239z
+const 
+input_241_axes_0
+
+
+*&
+name
+
+"
+input_241_axes_0*'
+val 
+
+
+
+
+
+ÿÿÿÿÿÿÿÿÿª
+
+layer_norm
+x
+
+	input_239
+axes
+
+input_241_axes_0E
+gamma<
+:
+8model_encoder_layer_10_attention_output_LayerNorm_weightB
+beta:
+8
+6model_encoder_layer_10_attention_output_LayerNorm_bias
+epsilon
+
+
+var_10'
+	input_241
+
+
+€
+€*
+name
+
+
"
+	input_241â
+linear
+x
+
+	input_241>
+weight4
+2
+0model_encoder_layer_10_intermediate_dense_weight:
+bias2
+0
+.model_encoder_layer_10_intermediate_dense_bias'
+	linear_64
+
+
+€
+€*
+name
+
+
"
+	linear_64e
+const
+input_245_mode_0
+*&
+name
+
+"
+input_245_mode_0*
+val
+
+	"
+EXACT‚
+gelu
+x
+
+	linear_64
+mode
+
+input_245_mode_0'
+	input_245
+
+
+€
+€*
+name
+
+
"
+	input_245Ö
+linear
+x
+
+	input_2458
+weight.
+,
+*model_encoder_layer_10_output_dense_weight4
+bias,
+*
+(model_encoder_layer_10_output_dense_bias'
+	linear_65
+
+
+€
+€*
+name
+
+
"
+	linear_65w
+add
+x
+
+	linear_65
+y
+
+	input_241'
+	input_249
+
+
+€
+€*
+name
+
+
"
+	input_249z
+const 
+input_251_axes_0
+
+
+*&
+name
+
+"
+input_251_axes_0*'
+val 
+
+
+
+
+
+ÿÿÿÿÿÿÿÿÿ–
+
+layer_norm
+x
+
+	input_249
+axes
+
+input_251_axes_0;
+gamma2
+0
+.model_encoder_layer_10_output_LayerNorm_weight8
+beta0
+.
+,model_encoder_layer_10_output_LayerNorm_bias
+epsilon
+
+
+var_10'
+	input_251
+
+
+€
+€*
+name
+
+
"
+	input_251æ
+linear
+x
+
+	input_251@
+weight6
+4
+2model_encoder_layer_11_attention_self_query_weight<
+bias4
+2
+0model_encoder_layer_11_attention_self_query_bias'
+	linear_66
+
+
+€
+€*
+name
+
+
"
+	linear_66â
+linear
+x
+
+	input_251>
+weight4
+2
+0model_encoder_layer_11_attention_self_key_weight:
+bias2
+0
+.model_encoder_layer_11_attention_self_key_bias'
+	linear_67
+
+
+€
+€*
+name
+
+
"
+	linear_67b
+const
+var_953
+
+
+*
+name
+
+
+"
+op_953*"
+val
+
+
+
+	
+€ z
+reshape
+x
+
+	linear_67
+shape
+	
+var_953(
+x_135
+
+
+€
+
+ *
+name
+
+	"
+x_135æ
+linear
+x
+
+	input_251@
+weight6
+4
+2model_encoder_layer_11_attention_self_value_weight<
+bias4
+2
+0model_encoder_layer_11_attention_self_value_bias'
+	linear_68
+
+
+€
+€*
+name
+
+
"
+	linear_68b
+const
+var_962
+
+
+*
+name
+
+
+"
+op_962*"
+val
+
+
+
+	
+€ z
+reshape
+x
+
+	linear_68
+shape
+	
+var_962(
+x_139
+
+
+€
+
+ *
+name
+
+	"
+x_139a
+const
+var_964
+
+
+*
+name
+
+
+"
+op_964*!
+val
+
+
+
+
+
+�b
+const
+var_968
+
+
+*
+name
+
+
+"
+op_968*"
+val
+
+
+
+	
+€ r
+reshape
+x
+
+	linear_66
+shape
+	
+var_968$
+x
+
+
+€
+
+ *
+name
+
+"
+xƒ
+const)
+!attention_scores_45_transpose_x_0
+*7
+name/
+'
+%"#
+!attention_scores_45_transpose_x_0*
+val
+
+
+�ƒ
+const)
+!attention_scores_45_transpose_y_0
+*7
+name/
+'
+%"#
+!attention_scores_45_transpose_y_0*
+val
+
+
+�z
+const#
+transpose_58_perm_0
+
+
+*)
+name!
+
+"
+transpose_58_perm_0*!
+val
+
+
+
+
+
+�z
+const#
+transpose_59_perm_0
+
+
+*)
+name!
+
+"
+transpose_59_perm_0*!
+val
+
+
+
+
+
+�‘
+	transpose
+x	
+
+x_135
+perm
+
+transpose_59_perm_0/
+transpose_61
+
+
+
+ 
+€*"
+name
+
+"
+transpose_61
+	transpose
+
+x
+
+x
+perm
+
+transpose_58_perm_0/
+transpose_62
+
+
+
+€
+ *"
+name
+
+"
+transpose_62†
+matmul
+x
+
+transpose_62
+y
+
+transpose_614
+transpose_x%
+#
+!attention_scores_45_transpose_x_04
+transpose_y%
+#
+!attention_scores_45_transpose_y_07
+attention_scores_45 
+
+
+
+€
+€*)
+name!
+
+"
+attention_scores_45€
+const&
+_inversed_attention_scores_y_0
+*4
+name,
+$
+"" 
+_inversed_attention_scores_y_0*
+val
+
+
+
+
+ó5>¾
+mul
+x
+
+attention_scores_45'
+y"
+ 
+_inversed_attention_scores_y_0>
+_inversed_attention_scores 
+
+
+
+€
+€*0
+name(
+ 
+"
+_inversed_attention_scores•
+add#
+x
+
+_inversed_attention_scores
+y
+
+attention_mask_1-
+	input_253 
+
+
+
+€
+€*
+name
+
+
"
+	input_253€
+softmax
+x
+
+	input_253
+axis	
+
+var_8-
+	input_255 
+
+
+
+€
+€*
+name
+
+
"
+	input_255}
+const&
+context_layer_45_transpose_x_0
+*4
+name,
+$
+"" 
+context_layer_45_transpose_x_0*
+val
+
+
+�}
+const&
+context_layer_45_transpose_y_0
+*4
+name,
+$
+"" 
+context_layer_45_transpose_y_0*
+val
+
+
+�…
+	transpose
+x	
+
+x_139
+perm
+	
+var_964/
+transpose_63
+
+
+
+€
+ *"
+name
+
+"
+transpose_63ö
+matmul
+x
+
+	input_255
+y
+
+transpose_631
+transpose_x"
+ 
+context_layer_45_transpose_x_01
+transpose_y"
+ 
+context_layer_45_transpose_y_03
+context_layer_45
+
+
+
+€
+ *&
+name
+
+"
+context_layer_45a
+const
+var_980
+
+
+*
+name
+
+
+"
+op_980*!
+val
+
+
+
+
+
+�b
+const
+var_985
+
+
+*
+name
+
+
+"
+op_985*"
+val
+
+
+
+	
+€€
+	transpose
+x
+
+context_layer_45
+perm
+	
+var_980/
+transpose_60
+
+
+€
+
+ *"
+name
+
+"
+transpose_60€
+reshape
+x
+
+transpose_60
+shape
+	
+var_985'
+	input_257
+
+
+€
+€*
+name
+
+
"
+	input_257ê
+linear
+x
+
+	input_257B
+weight8
+6
+4model_encoder_layer_11_attention_output_dense_weight>
+bias6
+4
+2model_encoder_layer_11_attention_output_dense_bias'
+	linear_69
+
+
+€
+€*
+name
+
+
"
+	linear_69w
+add
+x
+
+	linear_69
+y
+
+	input_251'
+	input_261
+
+
+€
+€*
+name
+
+
"
+	input_261z
+const 
+input_263_axes_0
+
+
+*&
+name
+
+"
+input_263_axes_0*'
+val 
+
+
+
+
+
+ÿÿÿÿÿÿÿÿÿª
+
+layer_norm
+x
+
+	input_261
+axes
+
+input_263_axes_0E
+gamma<
+:
+8model_encoder_layer_11_attention_output_LayerNorm_weightB
+beta:
+8
+6model_encoder_layer_11_attention_output_LayerNorm_bias
+epsilon
+
+
+var_10'
+	input_263
+
+
+€
+€*
+name
+
+
"
+	input_263â
+linear
+x
+
+	input_263>
+weight4
+2
+0model_encoder_layer_11_intermediate_dense_weight:
+bias2
+0
+.model_encoder_layer_11_intermediate_dense_bias'
+	linear_70
+
+
+€
+€*
+name
+
+
"
+	linear_70e
+const
+input_267_mode_0
+*&
+name
+
+"
+input_267_mode_0*
+val
+
+	"
+EXACT‚
+gelu
+x
+
+	linear_70
+mode
+
+input_267_mode_0'
+	input_267
+
+
+€
+€*
+name
+
+
"
+	input_267Ö
+linear
+x
+
+	input_2678
+weight.
+,
+*model_encoder_layer_11_output_dense_weight4
+bias,
+*
+(model_encoder_layer_11_output_dense_bias'
+	linear_71
+
+
+€
+€*
+name
+
+
"
+	linear_71w
+add
+x
+
+	linear_71
+y
+
+	input_263'
+	input_271
+
+
+€
+€*
+name
+
+
"
+	input_271‚
+const$
+hidden_states_axes_0
+
+
+**
+name"
+
+"
+hidden_states_axes_0*'
+val 
+
+
+
+
+
+ÿÿÿÿÿÿÿÿÿ¦
+
+layer_norm
+x
+
+	input_271 
+axes
+
+hidden_states_axes_0;
+gamma2
+0
+.model_encoder_layer_11_output_LayerNorm_weight8
+beta0
+.
+,model_encoder_layer_11_output_LayerNorm_bias
+epsilon
+
+
+var_10/
+last_hidden_state
+
+
+€
+€*#
+name
+
+"
+
hidden_statesu
+const!
+input_273_begin_0
+
+
+*'
+name
+
+"
+input_273_begin_0* 
+val
+
+
+	
+
+���r
+const
+input_273_end_0
+
+
+*%
+name
+
+"
+input_273_end_0*!
+val
+
+
+
+
+
+€{
+const$
+input_273_end_mask_0
+
+
+**
+name"
+
+"
+input_273_end_mask_0* 
+val
+
+
+	
+
+�ƒ
+const(
+input_273_squeeze_mask_0
+
+
+*.
+name&
+
+"
+input_273_squeeze_mask_0* 
+val
+
+
+	
+
+��ÿ
+slice_by_index
+x
+
+last_hidden_state
+begin
+
+input_273_begin_0
+end
+
+input_273_end_0$
+end_mask
+
+input_273_end_mask_0,
+squeeze_mask
+
+input_273_squeeze_mask_0 
+	input_273
+
+
+€*
+name
+
+
"
+	input_273­
+linear
+x
+
+	input_273'
+weight
+
+model_pooler_dense_weight#
+bias
+
+model_pooler_dense_bias 
+	linear_72
+
+
+€*
+name
+
+
"
+	linear_72_
+tanh
+x
+
+	linear_72$
+
pooler_output
+
+
+€*
+name
+
+"	
+op_1020"å
+	buildInfo×"
+
+
+Ä"Á
+6
+!
+
+"
+coremltools-version
+	
+"
+7.1
+@
+)
+!
+"
+coremltools-component-torch
+
+	"
+2.1.0
+E
+(
+ 
+"
+coremltools-source-dialect
+
+"
+TorchScript
\ No newline at end of file
diff --git a/Sources/SwiftNLP/Resources/float32_model.mlpackage/Data/com.apple.CoreML/weights/weight.bin b/Sources/SwiftNLP/Resources/float32_model.mlpackage/Data/com.apple.CoreML/weights/weight.bin
new file mode 100644
index 0000000000000000000000000000000000000000..71d86e38eb7ae4707228b8af0888c01ab9b7c4a5
Binary files /dev/null and b/Sources/SwiftNLP/Resources/float32_model.mlpackage/Data/com.apple.CoreML/weights/weight.bin differ
diff --git a/Sources/SwiftNLP/Resources/float32_model.mlpackage/Manifest.json b/Sources/SwiftNLP/Resources/float32_model.mlpackage/Manifest.json
new file mode 100644
index 0000000000000000000000000000000000000000..2eebcce458da7726c52b94969188388ac7f645b7
--- /dev/null
+++ b/Sources/SwiftNLP/Resources/float32_model.mlpackage/Manifest.json
@@ -0,0 +1,18 @@
+{
+    "fileFormatVersion": "1.0.0",
+    "itemInfoEntries": {
+        "63B0B880-B145-44C1-9871-506E9D0C9935": {
+            "author": "com.apple.CoreML",
+            "description": "CoreML Model Specification",
+            "name": "model.mlmodel",
+            "path": "com.apple.CoreML/model.mlmodel"
+        },
+        "98B220AA-0669-4838-B6C4-38F80D1282C2": {
+            "author": "com.apple.CoreML",
+            "description": "CoreML Model Weights",
+            "name": "weights",
+            "path": "com.apple.CoreML/weights"
+        }
+    },
+    "rootModelIdentifier": "63B0B880-B145-44C1-9871-506E9D0C9935"
+}
diff --git a/Sources/SwiftNLPGenericLLMMacros/Macros.swift b/Sources/SwiftNLPGenericLLMMacros/Macros.swift
new file mode 100644
index 0000000000000000000000000000000000000000..189c32d5b13ba20994f8eec2c75f832729e7e55f
--- /dev/null
+++ b/Sources/SwiftNLPGenericLLMMacros/Macros.swift
@@ -0,0 +1,107 @@
+import CoreML
+import SwiftSyntax
+import SwiftSyntaxMacros
+
+
+@available(macOS 12, iOS 15.0, tvOS 17.0, watchOS 10.0, *)
+public struct LLMModelPredictionCases: ExpressionMacro {
+    //        Example expansion:
+    //        {
+    //            switch self.model {
+    //            case "all_MiniLM_L6_v2":
+    //                let input_class = all_MiniLM_L6_v2Input(input_ids: inputIds, attention_mask: attentionMask);
+    //                output = try! LLMModel.prediction(input: input_class).featureValue(for: "embeddings")!.multiArrayValue!;
+    //            case "float32_model":
+    //                let input_class = float32_modelInput(input_ids: inputIds, attention_mask: attentionMask);
+    //                output = try! LLMModel.prediction(input: input_class).featureValue(for: "pooler_output")!.multiArrayValue!;
+    //            default:
+    //                output = nil;
+    //            }
+    //        }();
+    
+    public static func expansion(
+        of node:  some FreestandingMacroExpansionSyntax,
+        in context: some MacroExpansionContext
+    ) throws -> ExprSyntax {
+        guard let arg = node.argumentList.first?.expression,
+            let segments = arg.as(StringLiteralExprSyntax.self)?.segments,
+            segments.count == 1,
+            case .stringSegment(let literalSegment)? = segments.first
+        else {
+            throw fatalError("Bad argument to macro.")
+        }
+        
+        let model_key = literalSegment.content.text
+        
+        var macro = "{ switch self.model { "
+        
+        for (k, v) in LLM_MODEL_CLASSES {
+            let model_class = v[LLMModelClassesKey.Input]!
+            let model_feature = v[LLMModelClassesKey.FeatureName]!
+            macro +=
+                """
+                case \"\(k)\":
+                    let input_class = \(model_class)(\(model_key));
+                    output = try! LLMModel.prediction(input: input_class).featureValue(for: \"\(model_feature)\")!.multiArrayValue!;
+                """
+        }
+        
+        macro += "default: output = nil; } }();"
+        
+        return ExprSyntax(stringLiteral: macro)
+            
+    }
+}
+
+
+@available(macOS 12, iOS 15.0, tvOS 17.0, watchOS 10.0, *)
+public enum LLMPredictionFunctions: DeclarationMacro {
+    public static func expansion(
+        of node: some FreestandingMacroExpansionSyntax,
+        in context: some MacroExpansionContext
+    ) throws -> [DeclSyntax] {
+    
+        guard let arg = node.argumentList.first?.expression,
+            let segments = arg.as(StringLiteralExprSyntax.self)?.segments,
+            segments.count == 1,
+            case .stringSegment(let literalSegment)? = segments.first
+        else {
+            throw fatalError("Bad argument to macro.")
+        }
+        
+        let model_key = literalSegment.content.text
+
+        let model_type_name = LLM_MODEL_CLASSES[model_key]![LLMModelClassesKey.Model]!;
+        let model_input_name = LLM_MODEL_CLASSES[model_key]![LLMModelClassesKey.Input]!;
+        let model_output_name = LLM_MODEL_CLASSES[model_key]![LLMModelClassesKey.Output]!;
+
+    
+        return [
+            """
+            public static func prediction(input: \(raw: model_input_name)) throws -> \(raw: model_output_name) {
+                let model = try \(raw: model_type_name)();
+                return try model.prediction(input: input, options: MLPredictionOptions())
+            }
+
+            public static func prediction(input: \(raw: model_input_name), options: MLPredictionOptions) throws -> \(raw: model_output_name) {
+                let model = try \(raw: model_type_name)();
+                let outFeatures: MLFeatureProvider = try model.prediction(input: input, options:options)
+                return \(raw: model_output_name)(features: outFeatures)
+            }
+
+            @available(macOS 13.6, iOS 17.0, tvOS 17.0, watchOS 10.0, *)
+            public static func prediction(input: \(raw: model_input_name), options: MLPredictionOptions = MLPredictionOptions()) async throws -> \(raw: model_output_name) {
+                let model = try \(raw: model_type_name)();
+                let outFeatures: MLFeatureProvider? = try await model.prediction(input: input, options:options)
+                return \(raw: model_output_name)(features: outFeatures!)
+            }
+
+            public static func predictions(inputs: [\(raw: model_input_name)], options: MLPredictionOptions = MLPredictionOptions()) throws -> [\(raw: model_output_name)] {
+                let model = try \(raw: model_type_name)();
+                let res = try model.predictions(inputs: inputs, options: options);
+                return res;
+            }
+            """
+        ]
+    }
+}
diff --git a/Sources/SwiftNLPGenericLLMMacros/Main.swift b/Sources/SwiftNLPGenericLLMMacros/Main.swift
new file mode 100644
index 0000000000000000000000000000000000000000..a4618aa221b937eb1d15cdc212debe19c6a535b1
--- /dev/null
+++ b/Sources/SwiftNLPGenericLLMMacros/Main.swift
@@ -0,0 +1,11 @@
+import SwiftCompilerPlugin
+import SwiftSyntaxMacros
+
+@main
+struct SwiftNLPGenericLLMMacros: CompilerPlugin {
+    init() {}
+    var providingMacros: [SwiftSyntaxMacros.Macro.Type] = [
+        LLMPredictionFunctions.self,
+        LLMModelPredictionCases.self
+    ]
+}
diff --git a/Sources/SwiftNLPGenericLLMMacros/ModelClasses.swift b/Sources/SwiftNLPGenericLLMMacros/ModelClasses.swift
new file mode 100644
index 0000000000000000000000000000000000000000..94839a63b7f2ec171865b2c3593915f697fe209b
--- /dev/null
+++ b/Sources/SwiftNLPGenericLLMMacros/ModelClasses.swift
@@ -0,0 +1,21 @@
+enum LLMModelClassesKey {
+    case Input
+    case Output
+    case Model
+    case FeatureName
+}
+
+let LLM_MODEL_CLASSES: [String: [LLMModelClassesKey: Any]] = [
+    "all_MiniLM_L6_v2": [
+        LLMModelClassesKey.Input: all_MiniLM_L6_v2Input.self,
+        LLMModelClassesKey.Output: all_MiniLM_L6_v2Output.self,
+        LLMModelClassesKey.Model: all_MiniLM_L6_v2.self,
+        LLMModelClassesKey.FeatureName: "embeddings"
+    ],
+    "float32_model": [
+        LLMModelClassesKey.Input: float32_modelInput.self,
+        LLMModelClassesKey.Output: float32_modelOutput.self,
+        LLMModelClassesKey.Model: float32_model.self,
+        LLMModelClassesKey.FeatureName: "pooler_output"
+    ]
+]
diff --git a/Sources/SwiftNLPGenericLLMMacros/all-MiniLM-L6-v2.swift b/Sources/SwiftNLPGenericLLMMacros/all-MiniLM-L6-v2.swift
new file mode 100644
index 0000000000000000000000000000000000000000..c2f0c441add7ef7602ae99c4a202576c29faa97b
--- /dev/null
+++ b/Sources/SwiftNLPGenericLLMMacros/all-MiniLM-L6-v2.swift
@@ -0,0 +1,306 @@
+//
+// all_MiniLM_L6_v2.swift
+//
+// This file was automatically generated and should not be edited.
+//
+
+import CoreML
+
+
+/// Model Prediction Input Type
+@available(macOS 12.0, iOS 15.0, tvOS 15.0, watchOS 8.0, *)
+class all_MiniLM_L6_v2Input : MLFeatureProvider {
+
+    /// input_ids as 1 by 512 matrix of floats
+    var input_ids: MLMultiArray
+
+    /// attention_mask as 1 by 512 matrix of floats
+    var attention_mask: MLMultiArray
+
+    var featureNames: Set<String> {
+        get {
+            return ["input_ids", "attention_mask"]
+        }
+    }
+    
+    func featureValue(for featureName: String) -> MLFeatureValue? {
+        if (featureName == "input_ids") {
+            return MLFeatureValue(multiArray: input_ids)
+        }
+        if (featureName == "attention_mask") {
+            return MLFeatureValue(multiArray: attention_mask)
+        }
+        return nil
+    }
+    
+    init(input_ids: MLMultiArray, attention_mask: MLMultiArray) {
+        self.input_ids = input_ids
+        self.attention_mask = attention_mask
+    }
+
+    convenience init(input_ids: MLShapedArray<Float>, attention_mask: MLShapedArray<Float>) {
+        self.init(input_ids: MLMultiArray(input_ids), attention_mask: MLMultiArray(attention_mask))
+    }
+
+}
+
+
+/// Model Prediction Output Type
+@available(macOS 12.0, iOS 15.0, tvOS 15.0, watchOS 8.0, *)
+class all_MiniLM_L6_v2Output : MLFeatureProvider {
+
+    /// Source provided by CoreML
+    private let provider : MLFeatureProvider
+
+    /// embeddings as multidimensional array of floats
+    var embeddings: MLMultiArray {
+        return self.provider.featureValue(for: "embeddings")!.multiArrayValue!
+    }
+
+    /// embeddings as multidimensional array of floats
+    var embeddingsShapedArray: MLShapedArray<Float> {
+        return MLShapedArray<Float>(self.embeddings)
+    }
+
+    var featureNames: Set<String> {
+        return self.provider.featureNames
+    }
+    
+    func featureValue(for featureName: String) -> MLFeatureValue? {
+        return self.provider.featureValue(for: featureName)
+    }
+
+    init(embeddings: MLMultiArray) {
+        self.provider = try! MLDictionaryFeatureProvider(dictionary: ["embeddings" : MLFeatureValue(multiArray: embeddings)])
+    }
+
+    init(features: MLFeatureProvider) {
+        self.provider = features
+    }
+}
+
+
+/// Class for model loading and prediction
+@available(macOS 12.0, iOS 15.0, tvOS 15.0, watchOS 8.0, *)
+class all_MiniLM_L6_v2 {
+    let model: MLModel
+
+    /// URL of model assuming it was installed in the same bundle as this class
+    class var urlOfModelInThisBundle : URL {
+        let bundle = Bundle(for: self)
+        return bundle.url(forResource: "all-MiniLM-L6-v2", withExtension:"mlmodelc")!
+    }
+
+    /**
+        Construct all_MiniLM_L6_v2 instance with an existing MLModel object.
+
+        Usually the application does not use this initializer unless it makes a subclass of all_MiniLM_L6_v2.
+        Such application may want to use `MLModel(contentsOfURL:configuration:)` and `all_MiniLM_L6_v2.urlOfModelInThisBundle` to create a MLModel object to pass-in.
+
+        - parameters:
+          - model: MLModel object
+    */
+    init(model: MLModel) {
+        self.model = model
+    }
+
+    /**
+        Construct a model with configuration
+
+        - parameters:
+           - configuration: the desired model configuration
+
+        - throws: an NSError object that describes the problem
+    */
+    convenience init(configuration: MLModelConfiguration = MLModelConfiguration()) throws {
+        try self.init(contentsOf: type(of:self).urlOfModelInThisBundle, configuration: configuration)
+    }
+
+    /**
+        Construct all_MiniLM_L6_v2 instance with explicit path to mlmodelc file
+        - parameters:
+           - modelURL: the file url of the model
+
+        - throws: an NSError object that describes the problem
+    */
+    convenience init(contentsOf modelURL: URL) throws {
+        try self.init(model: MLModel(contentsOf: modelURL))
+    }
+
+    /**
+        Construct a model with URL of the .mlmodelc directory and configuration
+
+        - parameters:
+           - modelURL: the file url of the model
+           - configuration: the desired model configuration
+
+        - throws: an NSError object that describes the problem
+    */
+    convenience init(contentsOf modelURL: URL, configuration: MLModelConfiguration) throws {
+        try self.init(model: MLModel(contentsOf: modelURL, configuration: configuration))
+    }
+
+    /**
+        Construct all_MiniLM_L6_v2 instance asynchronously with optional configuration.
+
+        Model loading may take time when the model content is not immediately available (e.g. encrypted model). Use this factory method especially when the caller is on the main thread.
+
+        - parameters:
+          - configuration: the desired model configuration
+          - handler: the completion handler to be called when the model loading completes successfully or unsuccessfully
+    */
+    class func load(configuration: MLModelConfiguration = MLModelConfiguration(), completionHandler handler: @escaping (Swift.Result<all_MiniLM_L6_v2, Error>) -> Void) {
+        return self.load(contentsOf: self.urlOfModelInThisBundle, configuration: configuration, completionHandler: handler)
+    }
+
+    /**
+        Construct all_MiniLM_L6_v2 instance asynchronously with optional configuration.
+
+        Model loading may take time when the model content is not immediately available (e.g. encrypted model). Use this factory method especially when the caller is on the main thread.
+
+        - parameters:
+          - configuration: the desired model configuration
+    */
+    class func load(configuration: MLModelConfiguration = MLModelConfiguration()) async throws -> all_MiniLM_L6_v2 {
+        return try await self.load(contentsOf: self.urlOfModelInThisBundle, configuration: configuration)
+    }
+
+    /**
+        Construct all_MiniLM_L6_v2 instance asynchronously with URL of the .mlmodelc directory with optional configuration.
+
+        Model loading may take time when the model content is not immediately available (e.g. encrypted model). Use this factory method especially when the caller is on the main thread.
+
+        - parameters:
+          - modelURL: the URL to the model
+          - configuration: the desired model configuration
+          - handler: the completion handler to be called when the model loading completes successfully or unsuccessfully
+    */
+    class func load(contentsOf modelURL: URL, configuration: MLModelConfiguration = MLModelConfiguration(), completionHandler handler: @escaping (Swift.Result<all_MiniLM_L6_v2, Error>) -> Void) {
+        MLModel.load(contentsOf: modelURL, configuration: configuration) { result in
+            switch result {
+            case .failure(let error):
+                handler(.failure(error))
+            case .success(let model):
+                handler(.success(all_MiniLM_L6_v2(model: model)))
+            }
+        }
+    }
+
+    /**
+        Construct all_MiniLM_L6_v2 instance asynchronously with URL of the .mlmodelc directory with optional configuration.
+
+        Model loading may take time when the model content is not immediately available (e.g. encrypted model). Use this factory method especially when the caller is on the main thread.
+
+        - parameters:
+          - modelURL: the URL to the model
+          - configuration: the desired model configuration
+    */
+    class func load(contentsOf modelURL: URL, configuration: MLModelConfiguration = MLModelConfiguration()) async throws -> all_MiniLM_L6_v2 {
+        let model = try await MLModel.load(contentsOf: modelURL, configuration: configuration)
+        return all_MiniLM_L6_v2(model: model)
+    }
+
+    /**
+        Make a prediction using the structured interface
+
+        - parameters:
+           - input: the input to the prediction as all_MiniLM_L6_v2Input
+
+        - throws: an NSError object that describes the problem
+
+        - returns: the result of the prediction as all_MiniLM_L6_v2Output
+    */
+    func prediction(input: all_MiniLM_L6_v2Input) throws -> all_MiniLM_L6_v2Output {
+        return try self.prediction(input: input, options: MLPredictionOptions())
+    }
+
+    /**
+        Make a prediction using the structured interface
+
+        - parameters:
+           - input: the input to the prediction as all_MiniLM_L6_v2Input
+           - options: prediction options 
+
+        - throws: an NSError object that describes the problem
+
+        - returns: the result of the prediction as all_MiniLM_L6_v2Output
+    */
+    func prediction(input: all_MiniLM_L6_v2Input, options: MLPredictionOptions) throws -> all_MiniLM_L6_v2Output {
+        let outFeatures = try model.prediction(from: input, options:options)
+        return all_MiniLM_L6_v2Output(features: outFeatures)
+    }
+
+    /**
+        Make an asynchronous prediction using the structured interface
+
+        - parameters:
+           - input: the input to the prediction as all_MiniLM_L6_v2Input
+           - options: prediction options 
+
+        - throws: an NSError object that describes the problem
+
+        - returns: the result of the prediction as all_MiniLM_L6_v2Output
+    */
+    @available(macOS 14.0, iOS 17.0, tvOS 17.0, watchOS 10.0, *)
+    func prediction(input: all_MiniLM_L6_v2Input, options: MLPredictionOptions = MLPredictionOptions()) async throws -> all_MiniLM_L6_v2Output {
+        let outFeatures = try await model.prediction(from: input, options:options)
+        return all_MiniLM_L6_v2Output(features: outFeatures)
+    }
+
+    /**
+        Make a prediction using the convenience interface
+
+        - parameters:
+            - input_ids as 1 by 512 matrix of floats
+            - attention_mask as 1 by 512 matrix of floats
+
+        - throws: an NSError object that describes the problem
+
+        - returns: the result of the prediction as all_MiniLM_L6_v2Output
+    */
+    func prediction(input_ids: MLMultiArray, attention_mask: MLMultiArray) throws -> all_MiniLM_L6_v2Output {
+        let input_ = all_MiniLM_L6_v2Input(input_ids: input_ids, attention_mask: attention_mask)
+        return try self.prediction(input: input_)
+    }
+
+    /**
+        Make a prediction using the convenience interface
+
+        - parameters:
+            - input_ids as 1 by 512 matrix of floats
+            - attention_mask as 1 by 512 matrix of floats
+
+        - throws: an NSError object that describes the problem
+
+        - returns: the result of the prediction as all_MiniLM_L6_v2Output
+    */
+
+    func prediction(input_ids: MLShapedArray<Float>, attention_mask: MLShapedArray<Float>) throws -> all_MiniLM_L6_v2Output {
+        let input_ = all_MiniLM_L6_v2Input(input_ids: input_ids, attention_mask: attention_mask)
+        return try self.prediction(input: input_)
+    }
+
+    /**
+        Make a batch prediction using the structured interface
+
+        - parameters:
+           - inputs: the inputs to the prediction as [all_MiniLM_L6_v2Input]
+           - options: prediction options 
+
+        - throws: an NSError object that describes the problem
+
+        - returns: the result of the prediction as [all_MiniLM_L6_v2Output]
+    */
+    func predictions(inputs: [all_MiniLM_L6_v2Input], options: MLPredictionOptions = MLPredictionOptions()) throws -> [all_MiniLM_L6_v2Output] {
+        let batchIn = MLArrayBatchProvider(array: inputs)
+        let batchOut = try model.predictions(from: batchIn, options: options)
+        var results : [all_MiniLM_L6_v2Output] = []
+        results.reserveCapacity(inputs.count)
+        for i in 0..<batchOut.count {
+            let outProvider = batchOut.features(at: i)
+            let result =  all_MiniLM_L6_v2Output(features: outProvider)
+            results.append(result)
+        }
+        return results
+    }
+}
diff --git a/Sources/SwiftNLPGenericLLMMacros/float32_model.swift b/Sources/SwiftNLPGenericLLMMacros/float32_model.swift
new file mode 100644
index 0000000000000000000000000000000000000000..895c2fdb7ba5c7e9a7e2901daa59205de3bbded4
--- /dev/null
+++ b/Sources/SwiftNLPGenericLLMMacros/float32_model.swift
@@ -0,0 +1,316 @@
+//
+// float32_model.swift
+//
+// This file was automatically generated and should not be edited.
+//
+
+import CoreML
+
+
+/// Model Prediction Input Type
+@available(macOS 12.0, iOS 15.0, tvOS 15.0, watchOS 8.0, *)
+class float32_modelInput : MLFeatureProvider {
+
+    /// Indices of input sequence tokens in the vocabulary as 1 by 128 matrix of 32-bit integers
+    var input_ids: MLMultiArray
+
+    /// Mask to avoid performing attention on padding token indices (1 = not masked, 0 = masked) as 1 by 128 matrix of 32-bit integers
+    var attention_mask: MLMultiArray
+
+    var featureNames: Set<String> {
+        get {
+            return ["input_ids", "attention_mask"]
+        }
+    }
+    
+    func featureValue(for featureName: String) -> MLFeatureValue? {
+        if (featureName == "input_ids") {
+            return MLFeatureValue(multiArray: input_ids)
+        }
+        if (featureName == "attention_mask") {
+            return MLFeatureValue(multiArray: attention_mask)
+        }
+        return nil
+    }
+    
+    init(input_ids: MLMultiArray, attention_mask: MLMultiArray) {
+        self.input_ids = input_ids
+        self.attention_mask = attention_mask
+    }
+
+    convenience init(input_ids: MLShapedArray<Int32>, attention_mask: MLShapedArray<Int32>) {
+        self.init(input_ids: MLMultiArray(input_ids), attention_mask: MLMultiArray(attention_mask))
+    }
+
+}
+
+
+/// Model Prediction Output Type
+@available(macOS 12.0, iOS 15.0, tvOS 15.0, watchOS 8.0, *)
+class float32_modelOutput : MLFeatureProvider {
+
+    /// Source provided by CoreML
+    private let provider : MLFeatureProvider
+
+    /// Sequence of hidden-states at the output of the last layer of the model as 1 × 128 × 384 3-dimensional array of floats
+    var last_hidden_state: MLMultiArray {
+        return self.provider.featureValue(for: "last_hidden_state")!.multiArrayValue!
+    }
+
+    /// Sequence of hidden-states at the output of the last layer of the model as 1 × 128 × 384 3-dimensional array of floats
+    var last_hidden_stateShapedArray: MLShapedArray<Float> {
+        return MLShapedArray<Float>(self.last_hidden_state)
+    }
+
+    /// Last layer hidden-state of the first token of the sequence as 1 by 384 matrix of floats
+    var pooler_output: MLMultiArray {
+        return self.provider.featureValue(for: "pooler_output")!.multiArrayValue!
+    }
+
+    /// Last layer hidden-state of the first token of the sequence as 1 by 384 matrix of floats
+    var pooler_outputShapedArray: MLShapedArray<Float> {
+        return MLShapedArray<Float>(self.pooler_output)
+    }
+
+    var featureNames: Set<String> {
+        return self.provider.featureNames
+    }
+    
+    func featureValue(for featureName: String) -> MLFeatureValue? {
+        return self.provider.featureValue(for: featureName)
+    }
+
+    init(last_hidden_state: MLMultiArray, pooler_output: MLMultiArray) {
+        self.provider = try! MLDictionaryFeatureProvider(dictionary: ["last_hidden_state" : MLFeatureValue(multiArray: last_hidden_state), "pooler_output" : MLFeatureValue(multiArray: pooler_output)])
+    }
+
+    init(features: MLFeatureProvider) {
+        self.provider = features
+    }
+}
+
+
+/// Class for model loading and prediction
+@available(macOS 12.0, iOS 15.0, tvOS 15.0, watchOS 8.0, *)
+class float32_model {
+    let model: MLModel
+
+    /// URL of model assuming it was installed in the same bundle as this class
+    class var urlOfModelInThisBundle : URL {
+        let bundle = Bundle(for: self)
+        return bundle.url(forResource: "float32_model", withExtension:"mlmodelc")!
+    }
+
+    /**
+        Construct float32_model instance with an existing MLModel object.
+
+        Usually the application does not use this initializer unless it makes a subclass of float32_model.
+        Such application may want to use `MLModel(contentsOfURL:configuration:)` and `float32_model.urlOfModelInThisBundle` to create a MLModel object to pass-in.
+
+        - parameters:
+          - model: MLModel object
+    */
+    init(model: MLModel) {
+        self.model = model
+    }
+
+    /**
+        Construct a model with configuration
+
+        - parameters:
+           - configuration: the desired model configuration
+
+        - throws: an NSError object that describes the problem
+    */
+    convenience init(configuration: MLModelConfiguration = MLModelConfiguration()) throws {
+        try self.init(contentsOf: type(of:self).urlOfModelInThisBundle, configuration: configuration)
+    }
+
+    /**
+        Construct float32_model instance with explicit path to mlmodelc file
+        - parameters:
+           - modelURL: the file url of the model
+
+        - throws: an NSError object that describes the problem
+    */
+    convenience init(contentsOf modelURL: URL) throws {
+        try self.init(model: MLModel(contentsOf: modelURL))
+    }
+
+    /**
+        Construct a model with URL of the .mlmodelc directory and configuration
+
+        - parameters:
+           - modelURL: the file url of the model
+           - configuration: the desired model configuration
+
+        - throws: an NSError object that describes the problem
+    */
+    convenience init(contentsOf modelURL: URL, configuration: MLModelConfiguration) throws {
+        try self.init(model: MLModel(contentsOf: modelURL, configuration: configuration))
+    }
+
+    /**
+        Construct float32_model instance asynchronously with optional configuration.
+
+        Model loading may take time when the model content is not immediately available (e.g. encrypted model). Use this factory method especially when the caller is on the main thread.
+
+        - parameters:
+          - configuration: the desired model configuration
+          - handler: the completion handler to be called when the model loading completes successfully or unsuccessfully
+    */
+    class func load(configuration: MLModelConfiguration = MLModelConfiguration(), completionHandler handler: @escaping (Swift.Result<float32_model, Error>) -> Void) {
+        return self.load(contentsOf: self.urlOfModelInThisBundle, configuration: configuration, completionHandler: handler)
+    }
+
+    /**
+        Construct float32_model instance asynchronously with optional configuration.
+
+        Model loading may take time when the model content is not immediately available (e.g. encrypted model). Use this factory method especially when the caller is on the main thread.
+
+        - parameters:
+          - configuration: the desired model configuration
+    */
+    class func load(configuration: MLModelConfiguration = MLModelConfiguration()) async throws -> float32_model {
+        return try await self.load(contentsOf: self.urlOfModelInThisBundle, configuration: configuration)
+    }
+
+    /**
+        Construct float32_model instance asynchronously with URL of the .mlmodelc directory with optional configuration.
+
+        Model loading may take time when the model content is not immediately available (e.g. encrypted model). Use this factory method especially when the caller is on the main thread.
+
+        - parameters:
+          - modelURL: the URL to the model
+          - configuration: the desired model configuration
+          - handler: the completion handler to be called when the model loading completes successfully or unsuccessfully
+    */
+    class func load(contentsOf modelURL: URL, configuration: MLModelConfiguration = MLModelConfiguration(), completionHandler handler: @escaping (Swift.Result<float32_model, Error>) -> Void) {
+        MLModel.load(contentsOf: modelURL, configuration: configuration) { result in
+            switch result {
+            case .failure(let error):
+                handler(.failure(error))
+            case .success(let model):
+                handler(.success(float32_model(model: model)))
+            }
+        }
+    }
+
+    /**
+        Construct float32_model instance asynchronously with URL of the .mlmodelc directory with optional configuration.
+
+        Model loading may take time when the model content is not immediately available (e.g. encrypted model). Use this factory method especially when the caller is on the main thread.
+
+        - parameters:
+          - modelURL: the URL to the model
+          - configuration: the desired model configuration
+    */
+    class func load(contentsOf modelURL: URL, configuration: MLModelConfiguration = MLModelConfiguration()) async throws -> float32_model {
+        let model = try await MLModel.load(contentsOf: modelURL, configuration: configuration)
+        return float32_model(model: model)
+    }
+
+    /**
+        Make a prediction using the structured interface
+
+        - parameters:
+           - input: the input to the prediction as float32_modelInput
+
+        - throws: an NSError object that describes the problem
+
+        - returns: the result of the prediction as float32_modelOutput
+    */
+    func prediction(input: float32_modelInput) throws -> float32_modelOutput {
+        return try self.prediction(input: input, options: MLPredictionOptions())
+    }
+
+    /**
+        Make a prediction using the structured interface
+
+        - parameters:
+           - input: the input to the prediction as float32_modelInput
+           - options: prediction options 
+
+        - throws: an NSError object that describes the problem
+
+        - returns: the result of the prediction as float32_modelOutput
+    */
+    func prediction(input: float32_modelInput, options: MLPredictionOptions) throws -> float32_modelOutput {
+        let outFeatures = try model.prediction(from: input, options:options)
+        return float32_modelOutput(features: outFeatures)
+    }
+
+    /**
+        Make an asynchronous prediction using the structured interface
+
+        - parameters:
+           - input: the input to the prediction as float32_modelInput
+           - options: prediction options 
+
+        - throws: an NSError object that describes the problem
+
+        - returns: the result of the prediction as float32_modelOutput
+    */
+    @available(macOS 14.0, iOS 17.0, tvOS 17.0, watchOS 10.0, *)
+    func prediction(input: float32_modelInput, options: MLPredictionOptions = MLPredictionOptions()) async throws -> float32_modelOutput {
+        let outFeatures = try await model.prediction(from: input, options:options)
+        return float32_modelOutput(features: outFeatures)
+    }
+
+    /**
+        Make a prediction using the convenience interface
+
+        - parameters:
+            - input_ids: Indices of input sequence tokens in the vocabulary as 1 by 128 matrix of 32-bit integers
+            - attention_mask: Mask to avoid performing attention on padding token indices (1 = not masked, 0 = masked) as 1 by 128 matrix of 32-bit integers
+
+        - throws: an NSError object that describes the problem
+
+        - returns: the result of the prediction as float32_modelOutput
+    */
+    func prediction(input_ids: MLMultiArray, attention_mask: MLMultiArray) throws -> float32_modelOutput {
+        let input_ = float32_modelInput(input_ids: input_ids, attention_mask: attention_mask)
+        return try self.prediction(input: input_)
+    }
+
+    /**
+        Make a prediction using the convenience interface
+
+        - parameters:
+            - input_ids: Indices of input sequence tokens in the vocabulary as 1 by 128 matrix of 32-bit integers
+            - attention_mask: Mask to avoid performing attention on padding token indices (1 = not masked, 0 = masked) as 1 by 128 matrix of 32-bit integers
+
+        - throws: an NSError object that describes the problem
+
+        - returns: the result of the prediction as float32_modelOutput
+    */
+
+    func prediction(input_ids: MLShapedArray<Int32>, attention_mask: MLShapedArray<Int32>) throws -> float32_modelOutput {
+        let input_ = float32_modelInput(input_ids: input_ids, attention_mask: attention_mask)
+        return try self.prediction(input: input_)
+    }
+
+    /**
+        Make a batch prediction using the structured interface
+
+        - parameters:
+           - inputs: the inputs to the prediction as [float32_modelInput]
+           - options: prediction options 
+
+        - throws: an NSError object that describes the problem
+
+        - returns: the result of the prediction as [float32_modelOutput]
+    */
+    func predictions(inputs: [float32_modelInput], options: MLPredictionOptions = MLPredictionOptions()) throws -> [float32_modelOutput] {
+        let batchIn = MLArrayBatchProvider(array: inputs)
+        let batchOut = try model.predictions(from: batchIn, options: options)
+        var results : [float32_modelOutput] = []
+        results.reserveCapacity(inputs.count)
+        for i in 0..<batchOut.count {
+            let outProvider = batchOut.features(at: i)
+            let result =  float32_modelOutput(features: outProvider)
+            results.append(result)
+        }
+        return results
+    }
+}
diff --git a/Tests/SwiftNLPTests/AllMiniLM_sampleTest.swift b/Tests/SwiftNLPTests/AllMiniLM_sampleTest.swift
index 64e401f15040051663fc0fbbe638930d9ff4b247..c44b98da1aa4949cb15cfd6eb88de7dad84a7fa0 100644
--- a/Tests/SwiftNLPTests/AllMiniLM_sampleTest.swift
+++ b/Tests/SwiftNLPTests/AllMiniLM_sampleTest.swift
@@ -34,7 +34,7 @@ final class BERT_test: XCTestCase {
         var query_embedding: [Float] = []
         var embedding_dim: Int = 384
        
-        var model = MiniLMEmbeddings()
+        var model = MiniLMEmbeddings(model_type: "all_miniLM_L6_v2")
        
         query_embedding = await model.encode(sentence: query[0])!