llama.cpp LLM_ARCH_DEEPSEEK and LLM_ARCH_DEEPSEEK2

llama.cpp LLM_ARCH_DEEPSEEK and LLM_ARCH_DEEPSEEK2

  • 1. `LLM_ARCH_DEEPSEEK` and `LLM_ARCH_DEEPSEEK2`
  • 2. `LLM_ARCH_DEEPSEEK` and `LLM_ARCH_DEEPSEEK2`
  • 3. `struct ggml_cgraph * build_deepseek()` and `struct ggml_cgraph * build_deepseek2()`
  • References

不宜吹捧中国大语言模型的同时,又去贬低美国大语言模型。

水是人体的主要化学成分,约占体重的 50% 至 70%。大语言模型的含水量也不会太少。

llama.cpp
https://github.com/ggerganov/llama.cpp

1. LLM_ARCH_DEEPSEEK and LLM_ARCH_DEEPSEEK2

/home/yongqiang/llm_work/llama_cpp_25_01_05/llama.cpp/src/llama-arch.h
/home/yongqiang/llm_work/llama_cpp_25_01_05/llama.cpp/src/llama-arch.cpp

  • LLM_ARCH_DEEPSEEK and LLM_ARCH_DEEPSEEK2
//
// gguf constants (sync with gguf.py)
//enum llm_arch {LLM_ARCH_LLAMA,LLM_ARCH_DECI,LLM_ARCH_FALCON,LLM_ARCH_BAICHUAN,LLM_ARCH_GROK,LLM_ARCH_GPT2,LLM_ARCH_GPTJ,LLM_ARCH_GPTNEOX,LLM_ARCH_MPT,LLM_ARCH_STARCODER,LLM_ARCH_REFACT,LLM_ARCH_BERT,LLM_ARCH_NOMIC_BERT,LLM_ARCH_JINA_BERT_V2,LLM_ARCH_BLOOM,LLM_ARCH_STABLELM,LLM_ARCH_QWEN,LLM_ARCH_QWEN2,LLM_ARCH_QWEN2MOE,LLM_ARCH_QWEN2VL,LLM_ARCH_PHI2,LLM_ARCH_PHI3,LLM_ARCH_PHIMOE,LLM_ARCH_PLAMO,LLM_ARCH_CODESHELL,LLM_ARCH_ORION,LLM_ARCH_INTERNLM2,LLM_ARCH_MINICPM,LLM_ARCH_MINICPM3,LLM_ARCH_GEMMA,LLM_ARCH_GEMMA2,LLM_ARCH_STARCODER2,LLM_ARCH_MAMBA,LLM_ARCH_XVERSE,LLM_ARCH_COMMAND_R,LLM_ARCH_COHERE2,LLM_ARCH_DBRX,LLM_ARCH_OLMO,LLM_ARCH_OLMO2,LLM_ARCH_OLMOE,LLM_ARCH_OPENELM,LLM_ARCH_ARCTIC,LLM_ARCH_DEEPSEEK,LLM_ARCH_DEEPSEEK2,LLM_ARCH_CHATGLM,LLM_ARCH_BITNET,LLM_ARCH_T5,LLM_ARCH_T5ENCODER,LLM_ARCH_JAIS,LLM_ARCH_NEMOTRON,LLM_ARCH_EXAONE,LLM_ARCH_RWKV6,LLM_ARCH_RWKV6QWEN2,LLM_ARCH_GRANITE,LLM_ARCH_GRANITE_MOE,LLM_ARCH_CHAMELEON,LLM_ARCH_WAVTOKENIZER_DEC,LLM_ARCH_UNKNOWN,
};
  • { LLM_ARCH_DEEPSEEK, "deepseek" } and { LLM_ARCH_DEEPSEEK2, "deepseek2" }
static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {{ LLM_ARCH_LLAMA,            "llama"            },{ LLM_ARCH_DECI,             "deci"             },{ LLM_ARCH_FALCON,           "falcon"           },{ LLM_ARCH_GROK,             "grok"             },{ LLM_ARCH_GPT2,             "gpt2"             },{ LLM_ARCH_GPTJ,             "gptj"             },{ LLM_ARCH_GPTNEOX,          "gptneox"          },{ LLM_ARCH_MPT,              "mpt"              },{ LLM_ARCH_BAICHUAN,         "baichuan"         },{ LLM_ARCH_STARCODER,        "starcoder"        },{ LLM_ARCH_REFACT,           "refact"           },{ LLM_ARCH_BERT,             "bert"             },{ LLM_ARCH_NOMIC_BERT,       "nomic-bert"       },{ LLM_ARCH_JINA_BERT_V2,     "jina-bert-v2"     },{ LLM_ARCH_BLOOM,            "bloom"            },{ LLM_ARCH_STABLELM,         "stablelm"         },{ LLM_ARCH_QWEN,             "qwen"             },{ LLM_ARCH_QWEN2,            "qwen2"            },{ LLM_ARCH_QWEN2MOE,         "qwen2moe"         },{ LLM_ARCH_QWEN2VL,          "qwen2vl"          },{ LLM_ARCH_PHI2,             "phi2"             },{ LLM_ARCH_PHI3,             "phi3"             },{ LLM_ARCH_PHIMOE,           "phimoe"           },{ LLM_ARCH_PLAMO,            "plamo"            },{ LLM_ARCH_CODESHELL,        "codeshell"        },{ LLM_ARCH_ORION,            "orion"            },{ LLM_ARCH_INTERNLM2,        "internlm2"        },{ LLM_ARCH_MINICPM,          "minicpm"          },{ LLM_ARCH_MINICPM3,         "minicpm3"         },{ LLM_ARCH_GEMMA,            "gemma"            },{ LLM_ARCH_GEMMA2,           "gemma2"           },{ LLM_ARCH_STARCODER2,       "starcoder2"       },{ LLM_ARCH_MAMBA,            "mamba"            },{ LLM_ARCH_XVERSE,           "xverse"           },{ LLM_ARCH_COMMAND_R,        "command-r"        },{ LLM_ARCH_COHERE2,          "cohere2"          },{ LLM_ARCH_DBRX,             "dbrx"             },{ LLM_ARCH_OLMO,             "olmo"             },{ LLM_ARCH_OLMO2,            "olmo2"            },{ LLM_ARCH_OLMOE,            "olmoe"            },{ LLM_ARCH_OPENELM,          "openelm"          },{ LLM_ARCH_ARCTIC,           "arctic"           },{ LLM_ARCH_DEEPSEEK,         "deepseek"         },{ LLM_ARCH_DEEPSEEK2,        "deepseek2"        },{ LLM_ARCH_CHATGLM,          "chatglm"          },{ LLM_ARCH_BITNET,           "bitnet"           },{ LLM_ARCH_T5,               "t5"               },{ LLM_ARCH_T5ENCODER,        "t5encoder"        },{ LLM_ARCH_JAIS,             "jais"             },{ LLM_ARCH_NEMOTRON,         "nemotron"         },{ LLM_ARCH_EXAONE,           "exaone"           },{ LLM_ARCH_RWKV6,            "rwkv6"            },{ LLM_ARCH_RWKV6QWEN2,       "rwkv6qwen2"       },{ LLM_ARCH_GRANITE,          "granite"          },{ LLM_ARCH_GRANITE_MOE,      "granitemoe"       },{ LLM_ARCH_CHAMELEON,        "chameleon"        },{ LLM_ARCH_WAVTOKENIZER_DEC, "wavtokenizer-dec" },{ LLM_ARCH_UNKNOWN,          "(unknown)"        },
};

2. LLM_ARCH_DEEPSEEK and LLM_ARCH_DEEPSEEK2

/home/yongqiang/llm_work/llama_cpp_25_01_05/llama.cpp/src/llama-arch.cpp

  • LLM_ARCH_DEEPSEEK and LLM_ARCH_DEEPSEEK2
static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_NAMES = {{LLM_ARCH_LLAMA,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },{ LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_GATE_EXP,    "blk.%d.ffn_gate.%d" },{ LLM_TENSOR_FFN_DOWN_EXP,    "blk.%d.ffn_down.%d" },{ LLM_TENSOR_FFN_UP_EXP,      "blk.%d.ffn_up.%d" },{ LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },{ LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },{ LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },},},{LLM_ARCH_DECI,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },{ LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_GATE_EXP,    "blk.%d.ffn_gate.%d" },{ LLM_TENSOR_FFN_DOWN_EXP,    "blk.%d.ffn_down.%d" },{ LLM_TENSOR_FFN_UP_EXP,      "blk.%d.ffn_up.%d" },{ LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },{ LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },{ LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },},},{LLM_ARCH_BAICHUAN,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_FALCON,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_NORM_2,     "blk.%d.attn_norm_2" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_GROK,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },{ LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE_EXP,    "blk.%d.ffn_gate.%d" },{ LLM_TENSOR_FFN_DOWN_EXP,    "blk.%d.ffn_down.%d" },{ LLM_TENSOR_FFN_UP_EXP,      "blk.%d.ffn_up.%d" },{ LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },{ LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },{ LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },{ LLM_TENSOR_LAYER_OUT_NORM,  "blk.%d.layer_output_norm" },{ LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },},},{LLM_ARCH_GPT2,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_POS_EMBD,        "position_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },},},{LLM_ARCH_GPTJ,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },},},{LLM_ARCH_GPTNEOX,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_MPT,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output"},{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_ACT,         "blk.%d.ffn.act" },{ LLM_TENSOR_POS_EMBD,        "position_embd" },{ LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm"},{ LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm"},},},{LLM_ARCH_STARCODER,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_POS_EMBD,        "position_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },},},{LLM_ARCH_REFACT,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_BERT,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },{ LLM_TENSOR_TOKEN_TYPES,     "token_types" },{ LLM_TENSOR_POS_EMBD,        "position_embd" },{ LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_LAYER_OUT_NORM,  "blk.%d.layer_output_norm" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_CLS,             "cls" },{ LLM_TENSOR_CLS_OUT,         "cls.output" },},},{LLM_ARCH_NOMIC_BERT,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },{ LLM_TENSOR_TOKEN_TYPES,     "token_types" },{ LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_LAYER_OUT_NORM,  "blk.%d.layer_output_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_JINA_BERT_V2,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },{ LLM_TENSOR_TOKEN_TYPES,     "token_types" },{ LLM_TENSOR_ATTN_NORM_2,     "blk.%d.attn_norm_2" },{ LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_LAYER_OUT_NORM,  "blk.%d.layer_output_norm" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_CLS,             "cls" },},},{LLM_ARCH_BLOOM,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },},},{LLM_ARCH_STABLELM,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },{ LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },},},{LLM_ARCH_QWEN,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_QWEN2,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_QWEN2VL,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_QWEN2MOE,{{ LLM_TENSOR_TOKEN_EMBD,         "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,        "output_norm" },{ LLM_TENSOR_OUTPUT,             "output" },{ LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },{ LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },{ LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },{ LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },{ LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },{ LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },{ LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },{ LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },},},{LLM_ARCH_PHI2,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_PHI3,{{ LLM_TENSOR_TOKEN_EMBD,         "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,        "output_norm" },{ LLM_TENSOR_OUTPUT,             "output" },{ LLM_TENSOR_ROPE_FACTORS_LONG,  "rope_factors_long" },{ LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },{ LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_QKV,           "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },},},{LLM_ARCH_PHIMOE,{{ LLM_TENSOR_TOKEN_EMBD,         "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,        "output_norm" },{ LLM_TENSOR_OUTPUT,             "output" },{ LLM_TENSOR_ROPE_FACTORS_LONG,  "rope_factors_long" },{ LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },{ LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_QKV,           "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },{ LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },{ LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },{ LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },},},{LLM_ARCH_PLAMO,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_CODESHELL,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_ORION,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_INTERNLM2,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_MINICPM,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ROPE_FACTORS_LONG,  "rope_factors_long" },{ LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },{ LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_GATE_EXP,    "blk.%d.ffn_gate.%d" },{ LLM_TENSOR_FFN_DOWN_EXP,    "blk.%d.ffn_down.%d" },{ LLM_TENSOR_FFN_UP_EXP,      "blk.%d.ffn_up.%d" },},},{LLM_ARCH_MINICPM3,{{ LLM_TENSOR_TOKEN_EMBD,         "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,        "output_norm" },{ LLM_TENSOR_OUTPUT,             "output" },{ LLM_TENSOR_ROPE_FACTORS_LONG,  "rope_factors_long" },{ LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },{ LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q_A_NORM,      "blk.%d.attn_q_a_norm" },{ LLM_TENSOR_ATTN_KV_A_NORM,     "blk.%d.attn_kv_a_norm" },{ LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_Q_A,           "blk.%d.attn_q_a" },{ LLM_TENSOR_ATTN_Q_B,           "blk.%d.attn_q_b" },{ LLM_TENSOR_ATTN_KV_A_MQA,      "blk.%d.attn_kv_a_mqa" },{ LLM_TENSOR_ATTN_KV_B,          "blk.%d.attn_kv_b" },{ LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },},},{LLM_ARCH_GEMMA,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_GEMMA2,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_POST_NORM,  "blk.%d.post_attention_norm" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_POST_NORM,   "blk.%d.post_ffw_norm" },},},{LLM_ARCH_STARCODER2,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_MAMBA,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_SSM_IN,          "blk.%d.ssm_in" },{ LLM_TENSOR_SSM_CONV1D,      "blk.%d.ssm_conv1d" },{ LLM_TENSOR_SSM_X,           "blk.%d.ssm_x" },{ LLM_TENSOR_SSM_DT,          "blk.%d.ssm_dt" },{ LLM_TENSOR_SSM_A,           "blk.%d.ssm_a" },{ LLM_TENSOR_SSM_D,           "blk.%d.ssm_d" },{ LLM_TENSOR_SSM_OUT,         "blk.%d.ssm_out" },},},{LLM_ARCH_XVERSE,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_COMMAND_R,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },{ LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },},},{LLM_ARCH_COHERE2,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_DBRX,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },{ LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },{ LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },{ LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },{ LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },},},{LLM_ARCH_OLMO,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_OLMO2,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_POST_NORM,  "blk.%d.post_attention_norm" },{ LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },{ LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },{ LLM_TENSOR_FFN_POST_NORM,   "blk.%d.post_ffw_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_OLMOE,{{ LLM_TENSOR_TOKEN_EMBD,         "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,        "output_norm" },{ LLM_TENSOR_OUTPUT,             "output" },{ LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_Q_NORM,        "blk.%d.attn_q_norm" },{ LLM_TENSOR_ATTN_K_NORM,        "blk.%d.attn_k_norm" },{ LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },{ LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },{ LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },{ LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },},},{LLM_ARCH_OPENELM,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },{ LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_ARCTIC,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_NORM_EXPS,   "blk.%d.ffn_norm_exps" },{ LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },{ LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },{ LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },},},{LLM_ARCH_DEEPSEEK,{{ LLM_TENSOR_TOKEN_EMBD,         "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,        "output_norm" },{ LLM_TENSOR_OUTPUT,             "output" },{ LLM_TENSOR_ROPE_FREQS,         "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_ROT_EMBD,      "blk.%d.attn_rot_embd" },{ LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },{ LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },{ LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },{ LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },{ LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },{ LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },{ LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },{ LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },},},{LLM_ARCH_DEEPSEEK2,{{ LLM_TENSOR_TOKEN_EMBD,         "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,        "output_norm" },{ LLM_TENSOR_OUTPUT,             "output" },{ LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q_A_NORM,      "blk.%d.attn_q_a_norm" },{ LLM_TENSOR_ATTN_KV_A_NORM,     "blk.%d.attn_kv_a_norm" },{ LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_Q_A,           "blk.%d.attn_q_a" },{ LLM_TENSOR_ATTN_Q_B,           "blk.%d.attn_q_b" },{ LLM_TENSOR_ATTN_KV_A_MQA,      "blk.%d.attn_kv_a_mqa" },{ LLM_TENSOR_ATTN_KV_B,          "blk.%d.attn_kv_b" },{ LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },{ LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },{ LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },{ LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },{ LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },{ LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },{ LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },{ LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },{ LLM_TENSOR_FFN_EXP_PROBS_B,    "blk.%d.exp_probs_b" },},},{LLM_ARCH_CHATGLM,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },},},{LLM_ARCH_BITNET,{{ LLM_TENSOR_TOKEN_EMBD,         "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,        "output_norm" },{ LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_SUB_NORM,      "blk.%d.attn_sub_norm" },{ LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_SUB_NORM,       "blk.%d.ffn_sub_norm" },},},{LLM_ARCH_T5,{{ LLM_TENSOR_TOKEN_EMBD,           "token_embd" },{ LLM_TENSOR_OUTPUT,               "output" },{ LLM_TENSOR_DEC_OUTPUT_NORM,      "dec.output_norm" },{ LLM_TENSOR_DEC_ATTN_NORM,        "dec.blk.%d.attn_norm" },{ LLM_TENSOR_DEC_ATTN_Q,           "dec.blk.%d.attn_q" },{ LLM_TENSOR_DEC_ATTN_K,           "dec.blk.%d.attn_k" },{ LLM_TENSOR_DEC_ATTN_V,           "dec.blk.%d.attn_v" },{ LLM_TENSOR_DEC_ATTN_OUT,         "dec.blk.%d.attn_o" },{ LLM_TENSOR_DEC_ATTN_REL_B,       "dec.blk.%d.attn_rel_b" },{ LLM_TENSOR_DEC_CROSS_ATTN_NORM,  "dec.blk.%d.cross_attn_norm" },{ LLM_TENSOR_DEC_CROSS_ATTN_Q,     "dec.blk.%d.cross_attn_q" },{ LLM_TENSOR_DEC_CROSS_ATTN_K,     "dec.blk.%d.cross_attn_k" },{ LLM_TENSOR_DEC_CROSS_ATTN_V,     "dec.blk.%d.cross_attn_v" },{ LLM_TENSOR_DEC_CROSS_ATTN_OUT,   "dec.blk.%d.cross_attn_o" },{ LLM_TENSOR_DEC_CROSS_ATTN_REL_B, "dec.blk.%d.cross_attn_rel_b" },{ LLM_TENSOR_DEC_FFN_NORM,         "dec.blk.%d.ffn_norm" },{ LLM_TENSOR_DEC_FFN_GATE,         "dec.blk.%d.ffn_gate" },{ LLM_TENSOR_DEC_FFN_DOWN,         "dec.blk.%d.ffn_down" },{ LLM_TENSOR_DEC_FFN_UP,           "dec.blk.%d.ffn_up" },{ LLM_TENSOR_ENC_OUTPUT_NORM,      "enc.output_norm" },{ LLM_TENSOR_ENC_ATTN_NORM,        "enc.blk.%d.attn_norm" },{ LLM_TENSOR_ENC_ATTN_Q,           "enc.blk.%d.attn_q" },{ LLM_TENSOR_ENC_ATTN_K,           "enc.blk.%d.attn_k" },{ LLM_TENSOR_ENC_ATTN_V,           "enc.blk.%d.attn_v" },{ LLM_TENSOR_ENC_ATTN_OUT,         "enc.blk.%d.attn_o" },{ LLM_TENSOR_ENC_ATTN_REL_B,       "enc.blk.%d.attn_rel_b" },{ LLM_TENSOR_ENC_FFN_NORM,         "enc.blk.%d.ffn_norm" },{ LLM_TENSOR_ENC_FFN_GATE,         "enc.blk.%d.ffn_gate" },{ LLM_TENSOR_ENC_FFN_DOWN,         "enc.blk.%d.ffn_down" },{ LLM_TENSOR_ENC_FFN_UP,           "enc.blk.%d.ffn_up" },},},{LLM_ARCH_T5ENCODER,{{ LLM_TENSOR_TOKEN_EMBD,           "token_embd" },{ LLM_TENSOR_OUTPUT,               "output" },{ LLM_TENSOR_ENC_OUTPUT_NORM,      "enc.output_norm" },{ LLM_TENSOR_ENC_ATTN_NORM,        "enc.blk.%d.attn_norm" },{ LLM_TENSOR_ENC_ATTN_Q,           "enc.blk.%d.attn_q" },{ LLM_TENSOR_ENC_ATTN_K,           "enc.blk.%d.attn_k" },{ LLM_TENSOR_ENC_ATTN_V,           "enc.blk.%d.attn_v" },{ LLM_TENSOR_ENC_ATTN_OUT,         "enc.blk.%d.attn_o" },{ LLM_TENSOR_ENC_ATTN_REL_B,       "enc.blk.%d.attn_rel_b" },{ LLM_TENSOR_ENC_FFN_NORM,         "enc.blk.%d.ffn_norm" },{ LLM_TENSOR_ENC_FFN_GATE,         "enc.blk.%d.ffn_gate" },{ LLM_TENSOR_ENC_FFN_DOWN,         "enc.blk.%d.ffn_down" },{ LLM_TENSOR_ENC_FFN_UP,           "enc.blk.%d.ffn_up" },},},{LLM_ARCH_JAIS,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },},},{LLM_ARCH_NEMOTRON,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_EXAONE,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_RWKV6,{{ LLM_TENSOR_TOKEN_EMBD,                "token_embd" },{ LLM_TENSOR_TOKEN_EMBD_NORM,           "token_embd_norm" },{ LLM_TENSOR_OUTPUT_NORM,               "output_norm" },{ LLM_TENSOR_OUTPUT,                    "output" },{ LLM_TENSOR_ATTN_NORM,                 "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_NORM_2,               "blk.%d.attn_norm_2" },{ LLM_TENSOR_TIME_MIX_W1,               "blk.%d.time_mix_w1" },{ LLM_TENSOR_TIME_MIX_W2,               "blk.%d.time_mix_w2" },{ LLM_TENSOR_TIME_MIX_LERP_X,           "blk.%d.time_mix_lerp_x" },{ LLM_TENSOR_TIME_MIX_LERP_W,           "blk.%d.time_mix_lerp_w" },{ LLM_TENSOR_TIME_MIX_LERP_K,           "blk.%d.time_mix_lerp_k" },{ LLM_TENSOR_TIME_MIX_LERP_V,           "blk.%d.time_mix_lerp_v" },{ LLM_TENSOR_TIME_MIX_LERP_R,           "blk.%d.time_mix_lerp_r" },{ LLM_TENSOR_TIME_MIX_LERP_G,           "blk.%d.time_mix_lerp_g" },{ LLM_TENSOR_TIME_MIX_LERP_FUSED,       "blk.%d.time_mix_lerp_fused" },{ LLM_TENSOR_TIME_MIX_FIRST,            "blk.%d.time_mix_first" },{ LLM_TENSOR_TIME_MIX_DECAY,            "blk.%d.time_mix_decay" },{ LLM_TENSOR_TIME_MIX_DECAY_W1,         "blk.%d.time_mix_decay_w1" },{ LLM_TENSOR_TIME_MIX_DECAY_W2,         "blk.%d.time_mix_decay_w2" },{ LLM_TENSOR_TIME_MIX_KEY,              "blk.%d.time_mix_key" },{ LLM_TENSOR_TIME_MIX_VALUE,            "blk.%d.time_mix_value" },{ LLM_TENSOR_TIME_MIX_RECEPTANCE,       "blk.%d.time_mix_receptance" },{ LLM_TENSOR_TIME_MIX_GATE,             "blk.%d.time_mix_gate" },{ LLM_TENSOR_TIME_MIX_LN,               "blk.%d.time_mix_ln" },{ LLM_TENSOR_TIME_MIX_OUTPUT,           "blk.%d.time_mix_output" },{ LLM_TENSOR_CHANNEL_MIX_LERP_K,        "blk.%d.channel_mix_lerp_k" },{ LLM_TENSOR_CHANNEL_MIX_LERP_R,        "blk.%d.channel_mix_lerp_r" },{ LLM_TENSOR_CHANNEL_MIX_KEY,           "blk.%d.channel_mix_key" },{ LLM_TENSOR_CHANNEL_MIX_VALUE,         "blk.%d.channel_mix_value" },{ LLM_TENSOR_CHANNEL_MIX_RECEPTANCE,    "blk.%d.channel_mix_receptance" },},},{LLM_ARCH_RWKV6QWEN2,{{ LLM_TENSOR_TOKEN_EMBD,                "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,               "output_norm" },{ LLM_TENSOR_OUTPUT,                    "output" },{ LLM_TENSOR_ATTN_NORM,                 "blk.%d.attn_norm" },{ LLM_TENSOR_TIME_MIX_W1,               "blk.%d.time_mix_w1" },{ LLM_TENSOR_TIME_MIX_W2,               "blk.%d.time_mix_w2" },{ LLM_TENSOR_TIME_MIX_LERP_X,           "blk.%d.time_mix_lerp_x" },{ LLM_TENSOR_TIME_MIX_LERP_FUSED,       "blk.%d.time_mix_lerp_fused" },{ LLM_TENSOR_TIME_MIX_FIRST,            "blk.%d.time_mix_first" },{ LLM_TENSOR_TIME_MIX_DECAY,            "blk.%d.time_mix_decay" },{ LLM_TENSOR_TIME_MIX_DECAY_W1,         "blk.%d.time_mix_decay_w1" },{ LLM_TENSOR_TIME_MIX_DECAY_W2,         "blk.%d.time_mix_decay_w2" },{ LLM_TENSOR_TIME_MIX_KEY,              "blk.%d.time_mix_key" },{ LLM_TENSOR_TIME_MIX_VALUE,            "blk.%d.time_mix_value" },{ LLM_TENSOR_TIME_MIX_RECEPTANCE,       "blk.%d.time_mix_receptance" },{ LLM_TENSOR_TIME_MIX_GATE,             "blk.%d.time_mix_gate" },{ LLM_TENSOR_TIME_MIX_OUTPUT,           "blk.%d.time_mix_output" },{ LLM_TENSOR_FFN_NORM,                  "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,                  "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,                  "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,                    "blk.%d.ffn_up" },},},{LLM_ARCH_GRANITE,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },},},{LLM_ARCH_GRANITE_MOE,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },{ LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },{ LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },{ LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },},},{LLM_ARCH_CHAMELEON,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },{ LLM_TENSOR_OUTPUT_NORM,     "output_norm" },{ LLM_TENSOR_OUTPUT,          "output" },{ LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },{ LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },{ LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },{ LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },{ LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },{ LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },{ LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },{ LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },{ LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },{ LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },{ LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },},},{LLM_ARCH_WAVTOKENIZER_DEC,{{ LLM_TENSOR_TOKEN_EMBD,        "token_embd" },{ LLM_TENSOR_TOKEN_EMBD_NORM,   "token_embd_norm" },{ LLM_TENSOR_CONV1D,            "conv1d" },{ LLM_TENSOR_CONVNEXT_DW,       "convnext.%d.dw" },{ LLM_TENSOR_CONVNEXT_NORM,     "convnext.%d.norm" },{ LLM_TENSOR_CONVNEXT_PW1,      "convnext.%d.pw1" },{ LLM_TENSOR_CONVNEXT_PW2,      "convnext.%d.pw2" },{ LLM_TENSOR_CONVNEXT_GAMMA,    "convnext.%d.gamma" },{ LLM_TENSOR_OUTPUT_NORM,       "output_norm" },{ LLM_TENSOR_OUTPUT,            "output" },{ LLM_TENSOR_POS_NET_CONV1,     "posnet.%d.conv1" },{ LLM_TENSOR_POS_NET_CONV2,     "posnet.%d.conv2" },{ LLM_TENSOR_POS_NET_NORM,      "posnet.%d.norm" },{ LLM_TENSOR_POS_NET_NORM1,     "posnet.%d.norm1" },{ LLM_TENSOR_POS_NET_NORM2,     "posnet.%d.norm2" },{ LLM_TENSOR_POS_NET_ATTN_NORM, "posnet.%d.attn_norm" },{ LLM_TENSOR_POS_NET_ATTN_Q,    "posnet.%d.attn_q" },{ LLM_TENSOR_POS_NET_ATTN_K,    "posnet.%d.attn_k" },{ LLM_TENSOR_POS_NET_ATTN_V,    "posnet.%d.attn_v" },{ LLM_TENSOR_POS_NET_ATTN_OUT,  "posnet.%d.attn_output" },},},{LLM_ARCH_UNKNOWN,{{ LLM_TENSOR_TOKEN_EMBD,      "token_embd" },},},
};

3. struct ggml_cgraph * build_deepseek() and struct ggml_cgraph * build_deepseek2()

/home/yongqiang/llm_work/llama_cpp_25_01_05/llama.cpp/src/llama.cpp

  • struct ggml_cgraph * build_deepseek()
    struct ggml_cgraph * build_deepseek() {struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);// mutable variable, needed during the last layer of the computation to skip unused tokensint32_t n_tokens = this->n_tokens;const int64_t n_embd_head = hparams.n_embd_head_v;GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);GGML_ASSERT(n_embd_head == hparams.n_rot);struct ggml_tensor * cur;struct ggml_tensor * inpL;inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb);// inp_pos - contains the positionsstruct ggml_tensor * inp_pos = build_inp_pos();// KQ_mask (mask for 1 head, it will be broadcasted to all heads)struct ggml_tensor * KQ_mask = build_inp_KQ_mask();const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale;for (int il = 0; il < n_layer; ++il) {struct ggml_tensor * inpSA = inpL;// normcur = llm_build_norm(ctx0, inpL, hparams,model.layers[il].attn_norm, NULL,LLM_NORM_RMS, cb, il);cb(cur, "attn_norm", il);// self-attention{// rope freq factors for llama3; may return nullptr for llama2 and other modelsstruct ggml_tensor * rope_factors = build_rope_factors(il);// compute Q and K and RoPE themstruct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);cb(Qcur, "Qcur", il);if (model.layers[il].bq) {Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);cb(Qcur, "Qcur", il);}struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);cb(Kcur, "Kcur", il);if (model.layers[il].bk) {Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);cb(Kcur, "Kcur", il);}struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);cb(Vcur, "Vcur", il);if (model.layers[il].bv) {Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);cb(Vcur, "Vcur", il);}Qcur = ggml_rope_ext(ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors,n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,ext_factor, attn_factor, beta_fast, beta_slow);cb(Qcur, "Qcur", il);Kcur = ggml_rope_ext(ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, rope_factors,n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,ext_factor, attn_factor, beta_fast, beta_slow);cb(Kcur, "Kcur", il);cur = llm_build_kv(ctx0, lctx, kv_self, gf,model.layers[il].wo, model.layers[il].bo,Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, kq_scale, cb, il);}if (il == n_layer - 1) {// skip computing output for unused tokensstruct ggml_tensor * inp_out_ids = build_inp_out_ids();n_tokens = n_outputs;cur   = ggml_get_rows(ctx0,   cur, inp_out_ids);inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);}struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);cb(ffn_inp, "ffn_inp", il);cur = llm_build_norm(ctx0, ffn_inp, hparams,model.layers[il].ffn_norm, NULL,LLM_NORM_RMS, cb, il);cb(cur, "ffn_norm", il);if ((uint32_t) il < hparams.n_layer_dense_lead) {cur = llm_build_ffn(ctx0, lctx, cur,model.layers[il].ffn_up,   NULL, NULL,model.layers[il].ffn_gate, NULL, NULL,model.layers[il].ffn_down, NULL, NULL,NULL,LLM_FFN_SILU, LLM_FFN_PAR, cb, il);cb(cur, "ffn_out", il);} else {// MoE branchggml_tensor * moe_out =llm_build_moe_ffn(ctx0, lctx, cur,model.layers[il].ffn_gate_inp,model.layers[il].ffn_up_exps,model.layers[il].ffn_gate_exps,model.layers[il].ffn_down_exps,nullptr,n_expert, n_expert_used,LLM_FFN_SILU, false,false, hparams.expert_weights_scale,LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,cb, il);cb(moe_out, "ffn_moe_out", il);// FFN shared expert{ggml_tensor * ffn_shexp = llm_build_ffn(ctx0, lctx, cur,model.layers[il].ffn_up_shexp,   NULL, NULL,model.layers[il].ffn_gate_shexp, NULL, NULL,model.layers[il].ffn_down_shexp, NULL, NULL,NULL,LLM_FFN_SILU, LLM_FFN_PAR, cb, il);cb(ffn_shexp, "ffn_shexp", il);cur = ggml_add(ctx0, moe_out, ffn_shexp);cb(cur, "ffn_out", il);}}cur = ggml_add(ctx0, cur, ffn_inp);cur = lctx.cvec.apply_to(ctx0, cur, il);cb(cur, "l_out", il);// input for next layerinpL = cur;}cur = inpL;cur = llm_build_norm(ctx0, cur, hparams,model.output_norm, NULL,LLM_NORM_RMS, cb, -1);cb(cur, "result_norm", -1);// lm_headcur = llm_build_lora_mm(lctx, ctx0, model.output, cur);cb(cur, "result_output", -1);ggml_build_forward_expand(gf, cur);return gf;}
  • struct ggml_cgraph * build_deepseek2()
    struct ggml_cgraph * build_deepseek2() {struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);// mutable variable, needed during the last layer of the computation to skip unused tokensint32_t n_tokens = this->n_tokens;bool is_lite = (hparams.n_layer == 27);// We have to pre-scale kq_scale and attn_factor to make the YaRN RoPE work correctly.// See https://github.com/ggerganov/llama.cpp/discussions/7416 for detailed explanation.const float mscale = attn_factor * (1.0f + hparams.rope_yarn_log_mul * logf(1.0f / freq_scale));const float kq_scale = 1.0f*mscale*mscale/sqrtf(float(hparams.n_embd_head_k));const float attn_factor_scaled = 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale));const uint32_t n_embd_head_qk_rope = hparams.n_rot;const uint32_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;const uint32_t kv_lora_rank = hparams.n_lora_kv;struct ggml_tensor * cur;struct ggml_tensor * inpL;// {n_embd, n_tokens}inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb);// inp_pos - contains the positionsstruct ggml_tensor * inp_pos = build_inp_pos();// KQ_mask (mask for 1 head, it will be broadcasted to all heads)struct ggml_tensor * KQ_mask = build_inp_KQ_mask();for (int il = 0; il < n_layer; ++il) {struct ggml_tensor * inpSA = inpL;// normcur = llm_build_norm(ctx0, inpL, hparams,model.layers[il].attn_norm, NULL,LLM_NORM_RMS, cb, il);cb(cur, "attn_norm", il);// self_attention{struct ggml_tensor * q = NULL;if (!is_lite) {// {n_embd, q_lora_rank} * {n_embd, n_tokens} -> {q_lora_rank, n_tokens}q = ggml_mul_mat(ctx0, model.layers[il].wq_a, cur);cb(q, "q", il);q = llm_build_norm(ctx0, q, hparams,model.layers[il].attn_q_a_norm, NULL,LLM_NORM_RMS, cb, il);cb(q, "q", il);// {q_lora_rank, n_head * hparams.n_embd_head_k} * {q_lora_rank, n_tokens} -> {n_head * hparams.n_embd_head_k, n_tokens}q = ggml_mul_mat(ctx0, model.layers[il].wq_b, q);cb(q, "q", il);} else {q = ggml_mul_mat(ctx0, model.layers[il].wq, cur);cb(q, "q", il);}// split into {n_head * n_embd_head_qk_nope, n_tokens}struct ggml_tensor * q_nope = ggml_view_3d(ctx0, q, n_embd_head_qk_nope, n_head, n_tokens,ggml_row_size(q->type, hparams.n_embd_head_k),ggml_row_size(q->type, hparams.n_embd_head_k * n_head),0);cb(q_nope, "q_nope", il);// and {n_head * n_embd_head_qk_rope, n_tokens}struct ggml_tensor * q_pe = ggml_view_3d(ctx0, q, n_embd_head_qk_rope, n_head, n_tokens,ggml_row_size(q->type, hparams.n_embd_head_k),ggml_row_size(q->type, hparams.n_embd_head_k * n_head),ggml_row_size(q->type, n_embd_head_qk_nope));cb(q_pe, "q_pe", il);// {n_embd, kv_lora_rank + n_embd_head_qk_rope} * {n_embd, n_tokens} -> {kv_lora_rank + n_embd_head_qk_rope, n_tokens}struct ggml_tensor * kv_pe_compresseed = ggml_mul_mat(ctx0, model.layers[il].wkv_a_mqa, cur);cb(kv_pe_compresseed, "kv_pe_compresseed", il);// split into {kv_lora_rank, n_tokens}struct ggml_tensor * kv_compressed = ggml_view_2d(ctx0, kv_pe_compresseed, kv_lora_rank, n_tokens,kv_pe_compresseed->nb[1],0);cb(kv_compressed, "kv_compressed", il);// and {n_embd_head_qk_rope, n_tokens}struct ggml_tensor * k_pe = ggml_view_3d(ctx0, kv_pe_compresseed, n_embd_head_qk_rope, 1, n_tokens,kv_pe_compresseed->nb[1],kv_pe_compresseed->nb[1],ggml_row_size(kv_pe_compresseed->type, kv_lora_rank));cb(k_pe, "k_pe", il);kv_compressed = ggml_cont(ctx0, kv_compressed); // TODO: the CUDA backend does not support non-contiguous normkv_compressed = llm_build_norm(ctx0, kv_compressed, hparams,model.layers[il].attn_kv_a_norm, NULL,LLM_NORM_RMS, cb, il);cb(kv_compressed, "kv_compressed", il);// {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)} * {kv_lora_rank, n_tokens} -> {n_head * (n_embd_head_qk_nope + n_embd_head_v), n_tokens}struct ggml_tensor * kv = ggml_mul_mat(ctx0, model.layers[il].wkv_b, kv_compressed);cb(kv, "kv", il);// split into {n_head * n_embd_head_qk_nope, n_tokens}struct ggml_tensor * k_nope = ggml_view_3d(ctx0, kv, n_embd_head_qk_nope, n_head, n_tokens,ggml_row_size(kv->type, n_embd_head_qk_nope + hparams.n_embd_head_v),ggml_row_size(kv->type, n_head * (n_embd_head_qk_nope + hparams.n_embd_head_v)),0);cb(k_nope, "k_nope", il);// and {n_head * n_embd_head_v, n_tokens}struct ggml_tensor * v_states = ggml_view_3d(ctx0, kv, hparams.n_embd_head_v, n_head, n_tokens,ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)),ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)*n_head),ggml_row_size(kv->type, (n_embd_head_qk_nope)));cb(v_states, "v_states", il);v_states = ggml_cont(ctx0, v_states);cb(v_states, "v_states", il);v_states = ggml_view_2d(ctx0, v_states, hparams.n_embd_head_v * n_head, n_tokens,ggml_row_size(kv->type, hparams.n_embd_head_v * n_head),0);cb(v_states, "v_states", il);q_pe = ggml_cont(ctx0, q_pe); // TODO: the CUDA backend used to not support non-cont. RoPE, investigate removing thisq_pe = ggml_rope_ext(ctx0, q_pe, inp_pos, nullptr,n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,ext_factor, attn_factor_scaled, beta_fast, beta_slow);cb(q_pe, "q_pe", il);// shared RoPE keyk_pe = ggml_cont(ctx0, k_pe); // TODO: the CUDA backend used to not support non-cont. RoPE, investigate removing thisk_pe = ggml_rope_ext(ctx0, k_pe, inp_pos, nullptr,n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,ext_factor, attn_factor_scaled, beta_fast, beta_slow);cb(k_pe, "k_pe", il);struct ggml_tensor * q_states = ggml_concat(ctx0, q_nope, q_pe, 0);cb(q_states, "q_states", il);struct ggml_tensor * k_states = ggml_concat(ctx0, k_nope, ggml_repeat(ctx0, k_pe, q_pe), 0);cb(k_states, "k_states", il);cur = llm_build_kv(ctx0, lctx, kv_self, gf,model.layers[il].wo, NULL,k_states, v_states, q_states, KQ_mask, n_tokens, kv_head, n_kv, kq_scale, cb, il);}if (il == n_layer - 1) {// skip computing output for unused tokensstruct ggml_tensor * inp_out_ids = build_inp_out_ids();n_tokens = n_outputs;cur   = ggml_get_rows(ctx0,   cur, inp_out_ids);inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);}struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);cb(ffn_inp, "ffn_inp", il);cur = llm_build_norm(ctx0, ffn_inp, hparams,model.layers[il].ffn_norm, NULL,LLM_NORM_RMS, cb, il);cb(cur, "ffn_norm", il);if ((uint32_t) il < hparams.n_layer_dense_lead) {cur = llm_build_ffn(ctx0, lctx, cur,model.layers[il].ffn_up,   NULL, NULL,model.layers[il].ffn_gate, NULL, NULL,model.layers[il].ffn_down, NULL, NULL,NULL,LLM_FFN_SILU, LLM_FFN_PAR, cb, il);cb(cur, "ffn_out", il);} else {// MoE branchggml_tensor * moe_out =llm_build_moe_ffn(ctx0, lctx, cur,model.layers[il].ffn_gate_inp,model.layers[il].ffn_up_exps,model.layers[il].ffn_gate_exps,model.layers[il].ffn_down_exps,model.layers[il].ffn_exp_probs_b,n_expert, n_expert_used,LLM_FFN_SILU, hparams.expert_weights_norm,true, hparams.expert_weights_scale,(enum llama_expert_gating_func_type) hparams.expert_gating_func,cb, il);cb(moe_out, "ffn_moe_out", il);// FFN shared expert{ggml_tensor * ffn_shexp = llm_build_ffn(ctx0, lctx, cur,model.layers[il].ffn_up_shexp,   NULL, NULL,model.layers[il].ffn_gate_shexp, NULL, NULL,model.layers[il].ffn_down_shexp, NULL, NULL,NULL,LLM_FFN_SILU, LLM_FFN_PAR, cb, il);cb(ffn_shexp, "ffn_shexp", il);cur = ggml_add(ctx0, moe_out, ffn_shexp);cb(cur, "ffn_out", il);}}cur = ggml_add(ctx0, cur, ffn_inp);cur = lctx.cvec.apply_to(ctx0, cur, il);cb(cur, "l_out", il);// input for next layerinpL = cur;}cur = inpL;cur = llm_build_norm(ctx0, cur, hparams,model.output_norm, NULL,LLM_NORM_RMS, cb, -1);cb(cur, "result_norm", -1);// lm_headcur = ggml_mul_mat(ctx0, model.output, cur);cb(cur, "result_output", -1);ggml_build_forward_expand(gf, cur);return gf;}
  • case LLM_ARCH_DEEPSEEK: and case LLM_ARCH_DEEPSEEK2:
    switch (model.arch) {case LLM_ARCH_LLAMA:case LLM_ARCH_MINICPM:case LLM_ARCH_GRANITE:case LLM_ARCH_GRANITE_MOE:{result = llm.build_llama();} break;case LLM_ARCH_DECI:{result = llm.build_deci();} break;case LLM_ARCH_BAICHUAN:{result = llm.build_baichuan();} break;case LLM_ARCH_FALCON:{result = llm.build_falcon();} break;case LLM_ARCH_GROK:{result = llm.build_grok();} break;case LLM_ARCH_STARCODER:{result = llm.build_starcoder();} break;case LLM_ARCH_REFACT:{result = llm.build_refact();} break;case LLM_ARCH_BERT:case LLM_ARCH_JINA_BERT_V2:case LLM_ARCH_NOMIC_BERT:{result = llm.build_bert();} break;case LLM_ARCH_BLOOM:{result = llm.build_bloom();} break;case LLM_ARCH_MPT:{result = llm.build_mpt();} break;case LLM_ARCH_STABLELM:{result = llm.build_stablelm();} break;case LLM_ARCH_QWEN:{result = llm.build_qwen();} break;case LLM_ARCH_QWEN2:{result = llm.build_qwen2();} break;case LLM_ARCH_QWEN2VL:{lctx.n_pos_per_token = 4;result = llm.build_qwen2vl();} break;case LLM_ARCH_QWEN2MOE:{result = llm.build_qwen2moe();} break;case LLM_ARCH_PHI2:{result = llm.build_phi2();} break;case LLM_ARCH_PHI3:case LLM_ARCH_PHIMOE:{result = llm.build_phi3();} break;case LLM_ARCH_PLAMO:{result = llm.build_plamo();} break;case LLM_ARCH_GPT2:{result = llm.build_gpt2();} break;case LLM_ARCH_CODESHELL:{result = llm.build_codeshell();} break;case LLM_ARCH_ORION:{result = llm.build_orion();} break;case LLM_ARCH_INTERNLM2:{result = llm.build_internlm2();} break;case LLM_ARCH_MINICPM3:{result = llm.build_minicpm3();} break;case LLM_ARCH_GEMMA:{result = llm.build_gemma();} break;case LLM_ARCH_GEMMA2:{result = llm.build_gemma2();} break;case LLM_ARCH_STARCODER2:{result = llm.build_starcoder2();} break;case LLM_ARCH_MAMBA:{result = llm.build_mamba();} break;case LLM_ARCH_XVERSE:{result = llm.build_xverse();} break;case LLM_ARCH_COMMAND_R:{result = llm.build_command_r();} break;case LLM_ARCH_COHERE2:{result = llm.build_cohere2();} break;case LLM_ARCH_DBRX:{result = llm.build_dbrx();} break;case LLM_ARCH_OLMO:{result = llm.build_olmo();} break;case LLM_ARCH_OLMO2:{result = llm.build_olmo2();} break;case LLM_ARCH_OLMOE:{result = llm.build_olmoe();} break;case LLM_ARCH_OPENELM:{result = llm.build_openelm();} break;case LLM_ARCH_GPTNEOX:{result = llm.build_gptneox();} break;case LLM_ARCH_ARCTIC:{result = llm.build_arctic();} break;case LLM_ARCH_DEEPSEEK:{result = llm.build_deepseek();} break;case LLM_ARCH_DEEPSEEK2:{result = llm.build_deepseek2();} break;case LLM_ARCH_CHATGLM:{result = llm.build_chatglm();} break;case LLM_ARCH_BITNET:{result = llm.build_bitnet();} break;case LLM_ARCH_T5:{if (lctx.is_encoding) {result = llm.build_t5_enc();} else {result = llm.build_t5_dec();}} break;case LLM_ARCH_T5ENCODER:{result = llm.build_t5_enc();} break;case LLM_ARCH_JAIS:{result = llm.build_jais();} break;case LLM_ARCH_NEMOTRON:{result = llm.build_nemotron();} break;case LLM_ARCH_EXAONE:{result = llm.build_exaone();} break;case LLM_ARCH_RWKV6:{result = llm.build_rwkv6();} break;case LLM_ARCH_RWKV6QWEN2:{result = llm.build_rwkv6qwen2();} break;case LLM_ARCH_CHAMELEON:{result = llm.build_chameleon();} break;case LLM_ARCH_WAVTOKENIZER_DEC:{result = llm.build_wavtokenizer_dec();} break;default:GGML_ABORT("fatal error");}

References

[1] Yongqiang Cheng, https://yongqiang.blog.csdn.net/
[2] huggingface/gguf, https://github.com/huggingface/huggingface.js/tree/main/packages/gguf

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.rhkb.cn/news/8894.html

如若内容造成侵权/违法违规/事实不符,请联系长河编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

ue5 GAS制作一个技能,技能冷却,给剑添加碰撞预设,打击敌人

总结&#xff1a; 新建文件夹 ability 取名BP_BaseAbility 新建一个技能GAB_Melee 上面技能GAB_Melee和技能基类BP_BaseAbility 进入技能GAB_Melee&#xff0c;添加打印火云掌 给这个技能添加标签 点这个号 这样命名&#xff0c;小心这个点&#xff08;.&#xff09…

工作总结:git篇

文章目录 前言基础Gerrit1.克隆2.新建本地分支和checkout3.添加到暂存区新增文件到暂存区修改已经添加到暂存区的文件取消添加到暂存区的文件 4.提交到本地仓库在不重复提交的情况下&#xff0c;修改本次提交 5.提交到远程仓库6.评审其他辅助命令 前言 目前也算是工作一段时间…

ESP32 I2S音频总线学习笔记(二):I2S读取INMP441音频数据

简介 在这个系列的上一篇文章中&#xff0c;我们介绍了ESP32 I2S音频总线的相关知识&#xff0c;简要了解了什么是I2S总线、它的通信格式&#xff0c;以及相关的底层API函数。没有看过上篇文章的可以点击文章进行回顾&#xff1a; ESP32 I2S音频总线学习笔记&#xff08;一&a…

(学习总结21)C++11 异常与智能指针

C11 异常与智能指针 异常异常的概念异常的抛出和捕获栈展开查找匹配的处理代码异常重新抛出异常安全问题异常规范标准库的异常 智能指针RAII 和智能指针的设计思路智能指针的使用场景分析C标准库智能指针的使用weak_ptr 和 shared_ptr循环引用weak_ptrshared_ptr 循环引用问题 …

智能调度体系与自动驾驶技术优化运输配送效率的研究——兼论开源AI智能名片2+1链动模式S2B2C商城小程序的应用潜力

摘要&#xff1a;随着全球化和数字化进程的加速&#xff0c;消费者需求日益呈现出碎片化和个性化的趋势&#xff0c;这对物流运输行业提出了前所未有的挑战。传统的物流调度体系与调度方式已难以满足当前复杂多变的物流需求&#xff0c;因此&#xff0c;物流企业必须积极引入大…

AndroidCompose Navigation导航精通1-基本页面导航与ViewPager

文章目录 前言基本页面导航库依赖导航核心部件简单NavHost实现ViewPagerPager切换逻辑图阐述Pager导航实战前言 在当今的移动应用开发中,导航是用户与应用交互的核心环节。随着 Android Compose 的兴起,它为开发者提供了一种全新的、声明式的方式来构建用户界面,同时也带来…

noteboolm 使用笔记

今天心血来潮&#xff0c;想要体验下AInotebook&#xff0c;看看最新的软件能够做到什么程度。 于是来到了notebooklm&#xff0c;这是一个google推出的AI笔记本的网站&#xff0c;我想知道我们能在上面做一些怎么样有趣的事情&#xff01; 网址&#xff1a;https://notebookl…

JAVA 接口、抽象类的关系和用处 详细解析

接口 - Java教程 - 廖雪峰的官方网站 一个 抽象类 如果实现了一个接口&#xff0c;可以只选择实现接口中的 部分方法&#xff08;所有的方法都要有&#xff0c;可以一部分已经写具体&#xff0c;另一部分继续保留抽象&#xff09;&#xff0c;原因在于&#xff1a; 抽象类本身…

ReactNative react-devtools 夜神模拟器连调

目录 一、安装react-devtools 二、在package.json中配置启动项 三、联动 一、安装react-devtools yarn add react-devtools5.3.1 -D 这里选择5.3.1版本&#xff0c;因为高版本可能与夜神模拟器无法联动&#xff0c;导致部分功能无法正常使用。 二、在package.json中配置启…

关于使用Mybatis-plus的TableNameHandler动态表名处理器实现分表业务的详细介绍

引言 随着互联网应用的快速发展&#xff0c;数据量呈爆炸式增长。传统的单表设计在面对海量数据时显得力不从心&#xff0c;容易出现性能瓶颈、查询效率低下等问题。为了提高数据库的扩展性和响应速度&#xff0c;分表&#xff08;Sharding&#xff09;成为了一种常见的解决方案…

【开源免费】基于Vue和SpringBoot的在线文档管理系统(附论文)

本文项目编号 T 038 &#xff0c;文末自助获取源码 \color{red}{T038&#xff0c;文末自助获取源码} T038&#xff0c;文末自助获取源码 目录 一、系统介绍二、演示录屏三、启动教程四、功能截图五、文案资料5.1 选题背景5.2 国内外研究现状5.3 可行性分析 六、核心代码6.1 查…

智慧园区系统分类及其在提升企业管理效率中的创新应用探讨

内容概要 智慧园区的概念已经逐渐深入人心&#xff0c;成为现代城市发展中不可或缺的一部分。随着信息技术的飞速发展和数字化转型的不断推进&#xff0c;一系列智慧园区管理系统应运而生。这些系统不仅帮助企业提高了管理效率&#xff0c;还在多个方面激发了创新。 首先&…

图片上传实现图片预览的功能

文章目录 图片上传实现图片预览的功能一、引言二、拖拽上传实现预览1、HTML结构与样式2、JavaScript实现拖拽逻辑 三、选择文件上传实现预览1、HTML结构2、JavaScript实现预览逻辑 四、使用示例五、总结 图片上传实现图片预览的功能 一、引言 在现代网页设计中&#xff0c;图片…

电力晶体管(GTR)全控性器件

电力晶体管&#xff08;Giant Transistor&#xff0c;GTR&#xff09;是一种全控性器件&#xff0c;以下是关于它的详细介绍&#xff1a;&#xff08;模电普通晶体管三极管进行对比学习&#xff09; 基本概念 GTR是一种耐高电压、大电流的双极结型晶体管&#xff08;BJT&am…

Linux - 进程间通信(2)

目录 2、进程池 1&#xff09;理解进程池 2&#xff09;进程池的实现 整体框架&#xff1a; a. 加载任务 b. 先描述&#xff0c;再组织 I. 先描述 II. 再组织 c. 创建信道和子进程 d. 通过channel控制子进程 e. 回收管道和子进程 问题1&#xff1a; 解答1&#xff…

【阅读笔记】New Edge Diected Interpolation,NEDI算法,待续

一、概述 由Li等提出的新的边缘指导插值(New Edge—Di-ected Interpolation&#xff0c;NEDI)算法是一种具有良好边缘保持效果的新算法&#xff0c;它利用低分辨率图像与高分辨率图像的局部协方差问的几何对偶性来对高分辨率图像进行自适应插值。 2001年Xin Li和M.T. Orchard…

Windows安装Miniconda和PySide6以及配置PyCharm

目录 1. 选择Miniconda 2. 下载Miniconda 3. 安装Miniconda 4. 在base环境下创建pyside6环境 5. 安装pyside6环境 6. 配置PyCharm环境 7. 运行第一个程序效果 1. 选择Miniconda 选择Miniconda而没有选择Anaconda&#xff0c;是因为它是一个更小的Anaconda发行版&#x…

Linux之内存管理前世今生(一)

一个程序&#xff08;如王者荣耀&#xff09;平常是存储在硬盘上的&#xff0c;运行时才把这个程序载入内存&#xff0c;CPU才能执行。 问题&#xff1a; 这个程序载入内存的哪个位置呢&#xff1f;载入内核所在的空间吗&#xff1f;系统直接挂了。 一、虚拟内存 1.1 内存分…

Java基于SSM框架的互助学习平台小程序【附源码、文档】

博主介绍&#xff1a;✌IT徐师兄、7年大厂程序员经历。全网粉丝15W、csdn博客专家、掘金/华为云//InfoQ等平台优质作者、专注于Java技术领域和毕业项目实战✌ &#x1f345;文末获取源码联系&#x1f345; &#x1f447;&#x1f3fb; 精彩专栏推荐订阅&#x1f447;&#x1f3…

【Rust自学】16.3. 共享状态的并发

喜欢的话别忘了点赞、收藏加关注哦&#xff08;加关注即可阅读全文&#xff09;&#xff0c;对接下来的教程有兴趣的可以关注专栏。谢谢喵&#xff01;(&#xff65;ω&#xff65;) 16.3.1. 使用共享来实现并发 还记得Go语言有一句名言是这么说的&#xff1a;Do not commun…