diff --git a/Model_Architecture_Discussions/MiniCPM/MiniCPM.ipynb b/Model_Architecture_Discussions/MiniCPM/MiniCPM.ipynb
new file mode 100644
index 0000000..020f399
--- /dev/null
+++ b/Model_Architecture_Discussions/MiniCPM/MiniCPM.ipynb
@@ -0,0 +1,1065 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "/Users/zhihu123/anaconda3/envs/aiLLM/bin/python\n"
+ ]
+ }
+ ],
+ "source": [
+ "!which python"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [
+ {
+ "ename": "ImportError",
+ "evalue": "attempted relative import with no known parent package",
+ "output_type": "error",
+ "traceback": [
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+ "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)",
+ "Cell \u001b[0;32mIn[8], line 11\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtorch\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m nn\n\u001b[1;32m 10\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtorch\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mnn\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n\u001b[0;32m---> 11\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mconfiguration_minicpm\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m MiniCPMConfig\n\u001b[1;32m 12\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mre\u001b[39;00m\n\u001b[1;32m 14\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mlogging\u001b[39;00m\n",
+ "\u001b[0;31mImportError\u001b[0m: attempted relative import with no known parent package"
+ ]
+ }
+ ],
+ "source": [
+ "import math\n",
+ "import warnings\n",
+ "from typing import List, Optional, Tuple, Union, Dict\n",
+ "from collections import OrderedDict, UserDict\n",
+ "\n",
+ "import torch\n",
+ "import torch.nn.functional as F\n",
+ "import torch.utils.checkpoint\n",
+ "from torch import nn\n",
+ "from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n",
+ "from .configuration_minicpm import MiniCPMConfig\n",
+ "import re\n",
+ "\n",
+ "import logging\n",
+ "# from configuration_minicpm import MiniCPMConfig\n",
+ "\n",
+ "logger = logging.getLogger(__name__)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "MiniCPM 采用标准的 Decoder 作为其架构,主要包括三个部分:Embedding, Attention 和 MLP 层。我们对每一部分进行拆解,以便更好地理解其工作原理。整体代码源自于 [MiniCPM 官方仓库](https://github.com/OpenBMB/MiniCPM),这里逐步搭建模型,以便更好地理解其工作原理。"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### RoPE"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "位置编码部分,采用了 RoPE(Rotary Positional Embedding)的相对位置编码方式,帮助模型更好地理解序列中的位置信息。RoPE 的核心思想是将位置编码的计算转换为旋转矩阵的计算,从而减少计算量。RoPE 的计算公式如下:"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "`MiniCPMRotaryEmbedding` 实现了旋转位置嵌入(Rotary Position Embedding)。它计算并缓存旋转位置编码的余弦和正弦值,以便在前向传播过程中快速获取。"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class MiniCPMRotaryEmbedding(nn.Module):\n",
+ " def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):\n",
+ " super().__init__()\n",
+ "\n",
+ " self.dim = dim\n",
+ " self.max_position_embeddings = max_position_embeddings\n",
+ " self.base = base\n",
+ " # 计算了逆频率inv_freq并使用register_buffer方法将其注册为一个缓冲区\n",
+ " inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))\n",
+ " self.register_buffer(\"inv_freq\", inv_freq, persistent=False)\n",
+ "\n",
+ " # Build here to make `torch.jit.trace` work.\n",
+ " self._set_cos_sin_cache(\n",
+ " # seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()\n",
+ " seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.float32\n",
+ " )\n",
+ "\n",
+ " def _set_cos_sin_cache(self, seq_len, device, dtype):\n",
+ " # 计算并缓存余弦和正弦值\n",
+ " self.max_seq_len_cached = seq_len\n",
+ " t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)\n",
+ " freqs = torch.outer(t, self.inv_freq)\n",
+ " # Different from paper, but it uses a different permutation in order to obtain the same calculation\n",
+ " emb = torch.cat((freqs, freqs), dim=-1)\n",
+ "\n",
+ " self.register_buffer(\"cos_cached\", emb.cos().to(dtype), persistent=False)\n",
+ " self.register_buffer(\"sin_cached\", emb.sin().to(dtype), persistent=False)\n",
+ "\n",
+ " def forward(self, x, seq_len=None):\n",
+ " # 首先检查输入序列的长度是否超过了缓存的最大长度,如果超过了,则重新计算并缓存余弦和正弦值。然后,返回对应序列长度的余弦和正弦值。\n",
+ " # x: [bs, num_attention_heads, seq_len, head_size]\n",
+ " if seq_len > self.max_seq_len_cached:\n",
+ " self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)\n",
+ "\n",
+ " return (\n",
+ " self.cos_cached[:seq_len].to(dtype=x.dtype),\n",
+ " self.sin_cached[:seq_len].to(dtype=x.dtype),\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "此段代码的功能是对输入数据的一半隐藏维度进行旋转操作。将原本的后半部分旋转到前面,将原本的前半部分旋转到后面。"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def rotate_half(x):\n",
+ " \"\"\"旋转输入的隐藏维度的一半。\"\"\"\n",
+ " x1 = x[..., : x.shape[-1] // 2]\n",
+ " x2 = x[..., x.shape[-1] // 2 :]\n",
+ " return torch.cat((-x2, x1), dim=-1)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "此函数将旋转位置嵌入(Rotary Position Embedding)应用于查询和键张量。首先,函数获取键张量的数据类型,然后根据位置索引提取旋转嵌入的余弦和正弦部分,并在指定维度上进行扩展。接着,将查询和键张量转换为 float32 数据类型,并施加旋转嵌入。最后,将经过旋转嵌入处理的查询和键张量转换回其原始数据类型并返回。"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):\n",
+ " # cos = cos[position_ids].unsqueeze(unsqueeze_dim)\n",
+ " # sin = sin[position_ids].unsqueeze(unsqueeze_dim)\n",
+ " # q_embed = (q * cos) + (rotate_half(q) * sin)\n",
+ " # k_embed = (k * cos) + (rotate_half(k) * sin)\n",
+ " orig_dtype = k.dtype\n",
+ " cos = cos[position_ids].unsqueeze(unsqueeze_dim) # [bs, 1, seq_len, dim] 便于和[bs, num_heads, q_len, head_dim] 维度的 q,k 进行矩阵乘法\n",
+ " sin = sin[position_ids].unsqueeze(unsqueeze_dim) # [bs, 1, seq_len, dim]\n",
+ " q_fp32 = q.to(dtype=torch.float32, device=q.device)\n",
+ " k_fp32 = k.to(dtype=torch.float32, device=k.device)\n",
+ " q_embed = (q_fp32 * cos) + (rotate_half(q_fp32) * sin)\n",
+ " k_embed = (k_fp32 * cos) + (rotate_half(k_fp32) * sin)\n",
+ " return q_embed.to(dtype=orig_dtype), k_embed.to(dtype=orig_dtype) # [bs, num_heads, q_len, head_dim]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:\n",
+ " \"\"\"\n",
+ " This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,\n",
+ " num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)\n",
+ " \"\"\"\n",
+ " batch, num_key_value_heads, slen, head_dim = hidden_states.shape\n",
+ " if n_rep == 1:\n",
+ " return hidden_states\n",
+ " hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)\n",
+ " return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### attention"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "这段代码定义了一个名为MiniCPMAttention的类,它是一个实现了多头注意力机制的神经网络模块。\n",
+ "\n",
+ "forward方法是模型的前向传播过程。首先,对隐藏状态进行查询、键和值的变换,然后将变换后的查询、键和值状态重塑为指定的形状。接着,如果存在过去的键值对,则更新键值对的序列长度。然后,使用旋转嵌入对查询和键状态进行位置嵌入。如果存在过去的键值对,则更新键值对。然后,对键和值状态进行重复,以匹配键值组的数量。接着,计算注意力权重,并对其进行softmax变换和dropout操作。然后,使用注意力权重对值状态进行加权求和,得到注意力输出。最后,将注意力输出重塑为指定的形状,并通过输出变换层进行变换,得到最终的输出。如果不需要输出注意力权重,则将其设置为None。最后,返回注意力输出、注意力权重和过去的键值对。"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "相比如原始的 Attention,MiniCPMAttention 在计算 Embeddig 时采用 RoPE Embedding,这样可以更好地处理长序列。另外,MiniCPMAttention 支持键值对的缓存,这在自回归解码中非常有用,可以大大提高解码速度。"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [
+ {
+ "ename": "NameError",
+ "evalue": "name 'MiniCPMConfig' is not defined",
+ "output_type": "error",
+ "traceback": [
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+ "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
+ "Cell \u001b[0;32mIn[9], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mclass\u001b[39;00m \u001b[38;5;21;01mMiniCPMAttention\u001b[39;00m(nn\u001b[38;5;241m.\u001b[39mModule):\n\u001b[1;32m 2\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Multi-headed attention from 'Attention Is All You Need' paper\"\"\"\u001b[39;00m\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m, config: MiniCPMConfig, layer_idx: Optional[\u001b[38;5;28mint\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m):\n",
+ "Cell \u001b[0;32mIn[9], line 4\u001b[0m, in \u001b[0;36mMiniCPMAttention\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mclass\u001b[39;00m \u001b[38;5;21;01mMiniCPMAttention\u001b[39;00m(nn\u001b[38;5;241m.\u001b[39mModule):\n\u001b[1;32m 2\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Multi-headed attention from 'Attention Is All You Need' paper\"\"\"\u001b[39;00m\n\u001b[0;32m----> 4\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m, config: \u001b[43mMiniCPMConfig\u001b[49m, layer_idx: Optional[\u001b[38;5;28mint\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m):\n\u001b[1;32m 5\u001b[0m \u001b[38;5;28msuper\u001b[39m()\u001b[38;5;241m.\u001b[39m\u001b[38;5;21m__init__\u001b[39m()\n\u001b[1;32m 6\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconfig \u001b[38;5;241m=\u001b[39m config\n",
+ "\u001b[0;31mNameError\u001b[0m: name 'MiniCPMConfig' is not defined"
+ ]
+ }
+ ],
+ "source": [
+ "class MiniCPMAttention(nn.Module):\n",
+ " \"\"\"Multi-headed attention from 'Attention Is All You Need' paper\"\"\"\n",
+ "\n",
+ " def __init__(self, config: MiniCPMConfig, layer_idx: Optional[int] = None):\n",
+ " super().__init__()\n",
+ " self.config = config\n",
+ " self.layer_idx = layer_idx\n",
+ " if layer_idx is None:\n",
+ " layer_idx.warn_once(\n",
+ " f\"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will \"\n",
+ " \"to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` \"\n",
+ " \"when creating this class.\"\n",
+ " )\n",
+ "\n",
+ " self.attention_dropout = config.attention_dropout # 0.0\n",
+ " self.hidden_size = config.hidden_size # 2304\n",
+ " self.num_heads = config.num_attention_heads # 36\n",
+ " self.head_dim = self.hidden_size // self.num_heads # 64\n",
+ " self.num_key_value_heads = config.num_key_value_heads # 36\n",
+ " self.num_key_value_groups = self.num_heads // self.num_key_value_heads # 1\n",
+ " self.max_position_embeddings = config.max_position_embeddings # 2048\n",
+ " self.rope_theta = config.rope_theta # 10000.0\n",
+ " self.is_causal = True\n",
+ "\n",
+ " if (self.head_dim * self.num_heads) != self.hidden_size:\n",
+ " raise ValueError(\n",
+ " f\"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}\"\n",
+ " f\" and `num_heads`: {self.num_heads}).\"\n",
+ " )\n",
+ "\n",
+ " self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)\n",
+ " self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)\n",
+ " self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)\n",
+ " self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias)\n",
+ " self._init_rope()\n",
+ "\n",
+ " def _init_rope(self):\n",
+ " self.rotary_emb = MiniCPMRotaryEmbedding(\n",
+ " self.head_dim,\n",
+ " max_position_embeddings=self.max_position_embeddings,\n",
+ " base=self.rope_theta,\n",
+ " )\n",
+ "\n",
+ " def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):\n",
+ " return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()\n",
+ "\n",
+ " def forward(\n",
+ " self,\n",
+ " hidden_states: torch.Tensor,\n",
+ " attention_mask: Optional[torch.Tensor] = None,\n",
+ " position_ids: Optional[torch.LongTensor] = None,\n",
+ " past_key_value: Optional[Cache] = None,\n",
+ " output_attentions: bool = False,\n",
+ " use_cache: bool = False,\n",
+ " **kwargs,\n",
+ " ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n",
+ " if \"padding_mask\" in kwargs:\n",
+ " warnings.warn(\n",
+ " \"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`\"\n",
+ " )\n",
+ "\n",
+ " bsz, q_len, _ = hidden_states.size()\n",
+ "\n",
+ " # q,k,v 矩阵\n",
+ " query_states = self.q_proj(hidden_states)\n",
+ " key_states = self.k_proj(hidden_states)\n",
+ " value_states = self.v_proj(hidden_states)\n",
+ " \n",
+ " # 拆成 num_heads 个头 (bsz, num_heads, q_len, self.head_dim)\n",
+ " query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)\n",
+ " key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)\n",
+ " value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)\n",
+ "\n",
+ " kv_seq_len = key_states.shape[-2]\n",
+ " if past_key_value is not None:\n",
+ " if self.layer_idx is None:\n",
+ " raise ValueError(\n",
+ " f\"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} \"\n",
+ " \"for auto-regressive decoding with k/v caching, please make sure to initialize the attention class \"\n",
+ " \"with a layer index.\"\n",
+ " )\n",
+ " kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)\n",
+ " \n",
+ " # 获取 rope Embedding 对应的 cos 和 sin 值\n",
+ " cos, sin = self.rotary_emb(value_states.to(torch.float32), seq_len=kv_seq_len)\n",
+ " \n",
+ " # 计算经过 RoPE 后的 q, k Emb 值\n",
+ " query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)\n",
+ "\n",
+ " if past_key_value is not None:\n",
+ " cache_kwargs = {\"sin\": sin, \"cos\": cos} # Specific to RoPE models\n",
+ " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)\n",
+ " # num_key_value_groups 为 1 未进行拓展\n",
+ " # key_states = repeat_kv(key_states, self.num_key_value_groups)\n",
+ " # value_states = repeat_kv(value_states, self.num_key_value_groups)\n",
+ "\n",
+ " attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)\n",
+ " if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):\n",
+ " raise ValueError(\n",
+ " f\"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is\"\n",
+ " f\" {attn_weights.size()}\"\n",
+ " )\n",
+ "\n",
+ " if attention_mask is not None:\n",
+ " if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):\n",
+ " raise ValueError(\n",
+ " f\"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}\"\n",
+ " )\n",
+ " attn_weights = attn_weights + attention_mask\n",
+ "\n",
+ " # upcast attention to fp32\n",
+ " attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)\n",
+ " attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)\n",
+ " attn_output = torch.matmul(attn_weights, value_states)\n",
+ "\n",
+ " if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):\n",
+ " raise ValueError(\n",
+ " f\"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is\"\n",
+ " f\" {attn_output.size()}\"\n",
+ " )\n",
+ "\n",
+ " attn_output = attn_output.transpose(1, 2).contiguous()\n",
+ "\n",
+ " attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)\n",
+ "\n",
+ " attn_output = self.o_proj(attn_output)\n",
+ "\n",
+ " if not output_attentions:\n",
+ " attn_weights = None\n",
+ " \n",
+ " return attn_output, attn_weights, past_key_value\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### RMSNorm"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "`rms_layernorm` 是一种归一化层,它结合了 RMSProp 优化器和 Layer Normalization 的概念。可以对输入进行归一化处理,使得网络在训练过程中更加稳定。\n",
+ "\n",
+ "$$ y = W \\times \\left(\\frac{H}{\\sqrt{mean(H^2) + \\epsilon}}\\right) $$\n",
+ "\n",
+ "`rms_layernorm`层首先计算输入的平方的均值,然后用输入除以这个均值的平方根(加上一个很小的常数以防止除以零),从而确保输入的每个元素都在一个相对稳定的范围内。然后,这个层会乘以一个可学习的权重参数。\n",
+ "\n",
+ "这种归一化策略有助于减少训练过程中的内部协变量偏移,降低模型对初始化的敏感度,同时也能加速训练过程。"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# @torch.jit.script # type: ignore\n",
+ "def rms_layernorm(hidden: torch.Tensor, weight: torch.Tensor, eps: float):\n",
+ " old_dtype = hidden.dtype\n",
+ " variance = hidden.to(torch.float32).pow(2).mean(dim=-1, keepdim=True)\n",
+ " hidden = (hidden * torch.rsqrt(variance + eps)).to(old_dtype)\n",
+ " return hidden * weight"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "将 `rms_layernorm` 方法封装成一个类"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class MiniCPMRMSNorm(nn.Module):\n",
+ " def __init__(self, hidden_size, eps=1e-6):\n",
+ " super().__init__()\n",
+ " self.weight = nn.Parameter(torch.ones(hidden_size))\n",
+ " self.variance_epsilon = eps\n",
+ "\n",
+ " def forward(self, hidden_states):\n",
+ " return rms_layernorm(hidden_states, self.weight, self.variance_epsilon)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### SwiGLU 的 MLP"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "MiniCPM 的 MLP(多层感知器)结构采用 SwiGLU 激活层。该结构包含三个线性层:gate_proj、up_proj 和 down_proj,以及一个 SiLU 激活函数。将 gate_proj 层的结果通过 SiLU 激活函数转化,控制 up_proj 层的激活权重,对输入 x 进行特征提取和转换,然后通过 down_proj 层将转换后的特征映射回原始维度,从而实现一次前向传播。这种设计策略使得模型在保持输出维度不变的同时,能够有效地提取和转换输入特征。"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "SiLU(Sigmoid Linear Unit)激活函数是一种非线性函数,其公式为 $$ f(x) = x \\cdot \\sigma(x) $$当输入值为负时,该函数的输出接近于0;而当输入值为正时,输出则接近于输入值本身。这种特性使得 SiLU 函数具有无上界、有下界、平滑且非单调的特征。在深度学习模型的众多实践中,SiLU 函数已被证明在性能上超越了 ReLU 及其他激活函数。SiLU 函数不仅继承了 ReLU 激活函数的优点(例如,能够有效缓解梯度消失问题),同时也克服了 ReLU 函数的一些不足(例如,ReLU 函数在负数部分梯度为零,且非零中心)。此外,SiLU 函数是一种平滑函数,这意味着在其整个定义域内都存在导数,这对于优化过程是极其有利的。"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class MiniCPMMLP(nn.Module):\n",
+ " def __init__(self, config):\n",
+ " super().__init__()\n",
+ " self.config = config\n",
+ " self.hidden_size = config.hidden_size # 2304\n",
+ " self.intermediate_size = config.intermediate_size # 5760\n",
+ " self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)\n",
+ " self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)\n",
+ " self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)\n",
+ " self.act_fn = nn.SiLU()\n",
+ "\n",
+ " def forward(self, x): \n",
+ " down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))\n",
+ " return down_proj"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### DecoderLayer"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "我们构建 MiniCPM 模型的解码器层 `MiniCPMDecoderLayer`。\n",
+ "\n",
+ "我们将利用之前构建的组件来实现所需的各个功能部分:`MiniCPMAttention` 类用于注意力计算,`MiniCPMMLP` 类用于全连接层的计算,而 `MiniCPMRMSNorm` 类则用于层归一化(包括输入归一化和 attention 层后的归一化)的计算。\n",
+ "\n",
+ "整个解码器层的处理流程遵循了大多数解码器层的标准设计。首先,对输入的隐藏状态执行层归一化操作,然后通过自注意力机制对其进行处理。处理后的隐藏状态将与原始的隐藏状态进行残差连接,并按比例缩放。随后,对这个经过残差连接和缩放处理的隐藏状态再次进行层归一化操作,并通过全连接层进行处理。处理后的隐藏状态再一次与原始的隐藏状态进行残差连接,并按比例缩放。"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "\n",
+ "class MiniCPMDecoderLayer(nn.Module):\n",
+ " def __init__(self, config: MiniCPMConfig, layer_idx: int):\n",
+ " super().__init__()\n",
+ " self.hidden_size = config.hidden_size\n",
+ " self.self_attn = MiniCPMAttention(config=config, layer_idx=layer_idx)\n",
+ "\n",
+ " self.mlp = MiniCPMMLP(config)\n",
+ " self.input_layernorm = MiniCPMRMSNorm(config.hidden_size, eps=config.rms_norm_eps)\n",
+ " self.post_attention_layernorm = MiniCPMRMSNorm(config.hidden_size, eps=config.rms_norm_eps)\n",
+ "\n",
+ " self.scale_depth = config.scale_depth\n",
+ " self.num_hidden_layers = config.num_hidden_layers\n",
+ "\n",
+ " def forward(\n",
+ " self,\n",
+ " hidden_states: torch.Tensor,\n",
+ " attention_mask: Optional[torch.Tensor] = None,\n",
+ " position_ids: Optional[torch.LongTensor] = None,\n",
+ " past_key_value: Optional[Tuple[torch.Tensor]] = None,\n",
+ " output_attentions: Optional[bool] = False,\n",
+ " use_cache: Optional[bool] = False,\n",
+ " **kwargs,\n",
+ " ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n",
+ " \n",
+ " residual = hidden_states\n",
+ " hidden_states = self.input_layernorm(hidden_states)\n",
+ " # Self Attention\n",
+ " hidden_states, self_attn_weights, present_key_value = self.self_attn(\n",
+ " hidden_states=hidden_states,\n",
+ " attention_mask=attention_mask,\n",
+ " position_ids=position_ids,\n",
+ " past_key_value=past_key_value,\n",
+ " output_attentions=output_attentions,\n",
+ " use_cache=use_cache,\n",
+ " **kwargs,\n",
+ " )\n",
+ " \n",
+ " hidden_states = residual + hidden_states * (self.scale_depth / math.sqrt(self.num_hidden_layers))\n",
+ "\n",
+ " # Fully Connected\n",
+ " residual = hidden_states\n",
+ " hidden_states = self.post_attention_layernorm(hidden_states)\n",
+ "\n",
+ " hidden_states = self.mlp(hidden_states)\n",
+ " hidden_states = residual + hidden_states * (self.scale_depth / math.sqrt(self.num_hidden_layers))\n",
+ "\n",
+ " outputs = (hidden_states,)\n",
+ "\n",
+ " if output_attentions:\n",
+ " outputs += (self_attn_weights,)\n",
+ "\n",
+ " if use_cache:\n",
+ " outputs += (present_key_value,)\n",
+ "\n",
+ " return outputs\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class BaseModelOutputWithPast(OrderedDict):\n",
+ " last_hidden_state: torch.FloatTensor = None\n",
+ " past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n",
+ " hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None\n",
+ " attentions: Optional[Tuple[torch.FloatTensor, ...]] = None"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "\n",
+ "\n",
+ "def _prepare_4d_causal_attention_mask(\n",
+ " attention_mask: Optional[torch.Tensor],\n",
+ " input_shape: Union[torch.Size, Tuple, List],\n",
+ " inputs_embeds: torch.Tensor,\n",
+ " past_key_values_length: int,\n",
+ " sliding_window: Optional[int] = None,\n",
+ "):\n",
+ " \"\"\"\n",
+ " Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape\n",
+ " `(batch_size, key_value_length)`\n",
+ "\n",
+ " Args:\n",
+ " attention_mask (`torch.Tensor` or `None`):\n",
+ " A 2D attention mask of shape `(batch_size, key_value_length)`\n",
+ " input_shape (`tuple(int)` or `list(int)` or `torch.Size`):\n",
+ " The input shape should be a tuple that defines `(batch_size, query_length)`.\n",
+ " inputs_embeds (`torch.Tensor`):\n",
+ " The embedded inputs as a torch Tensor.\n",
+ " past_key_values_length (`int`):\n",
+ " The length of the key value cache.\n",
+ " sliding_window (`int`, *optional*):\n",
+ " If the model uses windowed attention, a sliding window should be passed.\n",
+ " \"\"\"\n",
+ " attn_mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=sliding_window)\n",
+ "\n",
+ " key_value_length = input_shape[-1] + past_key_values_length\n",
+ "\n",
+ " # 4d mask is passed through the layers\n",
+ " if attention_mask is not None and len(attention_mask.shape) == 2:\n",
+ " attention_mask = attn_mask_converter.to_4d(\n",
+ " attention_mask, input_shape[-1], key_value_length=key_value_length, dtype=inputs_embeds.dtype\n",
+ " )\n",
+ " elif attention_mask is not None and len(attention_mask.shape) == 4:\n",
+ " expected_shape = (input_shape[0], 1, input_shape[1], key_value_length)\n",
+ " if tuple(attention_mask.shape) != expected_shape:\n",
+ " raise ValueError(\n",
+ " f\"Incorrect 4D attention_mask shape: {tuple(attention_mask.shape)}; expected: {expected_shape}.\"\n",
+ " )\n",
+ " else:\n",
+ " # if the 4D mask has correct shape - invert it and fill with negative infinity\n",
+ " inverted_mask = 1.0 - attention_mask\n",
+ " attention_mask = inverted_mask.masked_fill(\n",
+ " inverted_mask.to(torch.bool), torch.finfo(inputs_embeds.dtype).min\n",
+ " )\n",
+ " else:\n",
+ " attention_mask = attn_mask_converter.to_causal_4d(\n",
+ " input_shape[0], input_shape[-1], key_value_length, dtype=inputs_embeds.dtype, device=inputs_embeds.device\n",
+ " )\n",
+ "\n",
+ " return attention_mask\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "在 forward 函数中,我们首先处理了输入参数,包括输入的词 ID input_ids,注意力掩码 attention_mask,位置 ID position_ids,过去的键值对 past_key_values,输入的词嵌入 inputs_embeds,以及一些控制参数,如是否使用缓存 use_cache,是否输出注意力 output_attentions,是否输出隐藏状态 output_hidden_states,以及是否返回字典 return_dict。\n",
+ "\n",
+ "然后,我们检查了输入的词 ID input_ids 和输入的词嵌入 inputs_embeds,确保它们不能同时被指定。接着,我们处理了位置 ID position_ids,如果它们没有被指定,我们就生成一个从 0 到序列长度的位置 ID。然后,我们根据输入的词 ID input_ids 生成了输入的词嵌入 inputs_embeds。\n",
+ "\n",
+ "接下来,我们准备了 4D 的因果注意力掩码 attention_mask,并将输入的词嵌入 inputs_embeds 赋值给隐藏状态 hidden_states。\n",
+ "\n",
+ "然后,我们通过解码器层 layers 对隐藏状态 hidden_states 进行了处理,并根据是否输出隐藏状态 output_hidden_states 和注意力 output_attentions 来收集所有的隐藏状态 all_hidden_states 和自注意力 all_self_attns。\n",
+ "\n",
+ "最后,我们对最后一层的隐藏状态 hidden_states 进行了归一化处理,并根据是否使用缓存 use_cache 和是否返回字典 return_dict 来决定输出的内容。"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Model"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "首先,计算批次大小和序列长度,这一步是基于输入的input_ids或inputs_embeds完成的。\n",
+ "如果启用了缓存机制(这通常用于提高连续解码任务的效率),则对过去的键值对进行相应处理。\n",
+ "根据是否提供了position_ids,可能会自动生成默认的位置索引。\n",
+ "利用词嵌入层,将输入的词汇索引转换成向量表示形式。\n",
+ "输入的嵌入向量将通过一系列解码器层进行处理,每个解码器层都可能会更新隐藏状态、注意力权重和缓存。\n",
+ "最终,对最后一层的输出应用归一化处理。"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class MiniCPMModel(nn.Module):\n",
+ "\n",
+ " def __init__(self, config: MiniCPMConfig):\n",
+ " super().__init__(config)\n",
+ " self.padding_idx = config.pad_token_id\n",
+ " self.vocab_size = config.vocab_size\n",
+ "\n",
+ " self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)\n",
+ " self.layers = nn.ModuleList(\n",
+ " [MiniCPMDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]\n",
+ " )\n",
+ "\n",
+ " self.norm = MiniCPMRMSNorm(config.hidden_size, eps=config.rms_norm_eps)\n",
+ "\n",
+ " self.gradient_checkpointing = False\n",
+ " self._init_weights()\n",
+ " \n",
+ " def _init_weights(self, module):\n",
+ " std = self.config.initializer_range\n",
+ " if isinstance(module, nn.Linear):\n",
+ " module.weight.data.normal_(mean=0.0, std=std)\n",
+ " if module.bias is not None:\n",
+ " module.bias.data.zero_()\n",
+ " elif isinstance(module, nn.Embedding):\n",
+ " module.weight.data.normal_(mean=0.0, std=std)\n",
+ " if module.padding_idx is not None:\n",
+ " module.weight.data[module.padding_idx].zero_()\n",
+ "\n",
+ " def get_input_embeddings(self):\n",
+ " return self.embed_tokens\n",
+ "\n",
+ " def set_input_embeddings(self, value):\n",
+ " self.embed_tokens = value\n",
+ "\n",
+ " def forward(\n",
+ " self,\n",
+ " input_ids: torch.LongTensor = None,\n",
+ " attention_mask: Optional[torch.Tensor] = None,\n",
+ " position_ids: Optional[torch.LongTensor] = None,\n",
+ " past_key_values: Optional[List[torch.FloatTensor]] = None,\n",
+ " inputs_embeds: Optional[torch.FloatTensor] = None,\n",
+ " use_cache: Optional[bool] = None,\n",
+ " output_attentions: Optional[bool] = None,\n",
+ " output_hidden_states: Optional[bool] = None,\n",
+ " return_dict: Optional[bool] = None,\n",
+ " ) -> Union[Tuple, BaseModelOutputWithPast]:\n",
+ " output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n",
+ " output_hidden_states = (\n",
+ " output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n",
+ " )\n",
+ " use_cache = use_cache if use_cache is not None else self.config.use_cache\n",
+ "\n",
+ " return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n",
+ "\n",
+ " # retrieve input_ids and inputs_embeds\n",
+ " if input_ids is not None and inputs_embeds is not None:\n",
+ " raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n",
+ " elif input_ids is not None:\n",
+ " batch_size, seq_length = input_ids.shape[:2]\n",
+ " elif inputs_embeds is not None:\n",
+ " batch_size, seq_length = inputs_embeds.shape[:2]\n",
+ " else:\n",
+ " raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n",
+ "\n",
+ " past_key_values_length = 0\n",
+ " \n",
+ " if use_cache:\n",
+ " use_legacy_cache = not isinstance(past_key_values, Cache)\n",
+ " if use_legacy_cache:\n",
+ " past_key_values = DynamicCache.from_legacy_cache(past_key_values)\n",
+ " past_key_values_length = past_key_values.get_usable_length(seq_length)\n",
+ "\n",
+ " if position_ids is None:\n",
+ " device = input_ids.device if input_ids is not None else inputs_embeds.device\n",
+ " position_ids = torch.arange(\n",
+ " past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device\n",
+ " )\n",
+ " position_ids = position_ids.unsqueeze(0)\n",
+ "\n",
+ " if inputs_embeds is None:\n",
+ " inputs_embeds = self.embed_tokens(input_ids) * self.config.scale_emb\n",
+ "\n",
+ " # 4d mask is passed through the layers\n",
+ " attention_mask = _prepare_4d_causal_attention_mask(\n",
+ " attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length\n",
+ " )\n",
+ "\n",
+ " # embed positions\n",
+ " hidden_states = inputs_embeds\n",
+ "\n",
+ " # decoder layers\n",
+ " all_hidden_states = () if output_hidden_states else None\n",
+ " all_self_attns = () if output_attentions else None\n",
+ " next_decoder_cache = None\n",
+ "\n",
+ " for decoder_layer in self.layers:\n",
+ " if output_hidden_states:\n",
+ " all_hidden_states += (hidden_states,)\n",
+ "\n",
+ " layer_outputs = decoder_layer(\n",
+ " hidden_states,\n",
+ " attention_mask=attention_mask,\n",
+ " position_ids=position_ids,\n",
+ " past_key_value=past_key_values,\n",
+ " output_attentions=output_attentions,\n",
+ " use_cache=use_cache,\n",
+ " )\n",
+ "\n",
+ " hidden_states = layer_outputs[0]\n",
+ "\n",
+ " if use_cache:\n",
+ " next_decoder_cache = layer_outputs[2 if output_attentions else 1]\n",
+ "\n",
+ " if output_attentions:\n",
+ " all_self_attns += (layer_outputs[1],)\n",
+ "\n",
+ " hidden_states = self.norm(hidden_states)\n",
+ "\n",
+ " # add hidden states from the last decoder layer\n",
+ " if output_hidden_states:\n",
+ " all_hidden_states += (hidden_states,)\n",
+ "\n",
+ " next_cache = None\n",
+ " if not return_dict:\n",
+ " return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)\n",
+ " return BaseModelOutputWithPast(\n",
+ " last_hidden_state=hidden_states,\n",
+ " past_key_values=next_cache,\n",
+ " hidden_states=all_hidden_states,\n",
+ " attentions=all_self_attns,\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### CausalLM"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "定义了一个 `CausalLMOutputWithPast`类,主要用于因果语言模型(或自回归模型)的输出。"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class CausalLMOutputWithPast(OrderedDict):\n",
+ " loss: Optional[torch.FloatTensor] = None\n",
+ " logits: torch.FloatTensor = None\n",
+ " past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n",
+ " hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None\n",
+ " attentions: Optional[Tuple[torch.FloatTensor, ...]] = None"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class MiniCPMForCausalLM(nn.Module):\n",
+ " _tied_weights_keys = [\"lm_head.weight\"]\n",
+ "\n",
+ " def __init__(self, config):\n",
+ " super().__init__(config)\n",
+ " self.model = MiniCPMModel(config)\n",
+ " self.vocab_size = config.vocab_size\n",
+ " self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n",
+ "\n",
+ " # Initialize weights and apply final processing\n",
+ " self.post_init()\n",
+ "\n",
+ " def get_input_embeddings(self):\n",
+ " return self.model.embed_tokens\n",
+ "\n",
+ " def set_input_embeddings(self, value):\n",
+ " self.model.embed_tokens = value\n",
+ "\n",
+ " def get_output_embeddings(self):\n",
+ " return self.lm_head\n",
+ "\n",
+ " def set_output_embeddings(self, new_embeddings):\n",
+ " self.lm_head = new_embeddings\n",
+ "\n",
+ " def set_decoder(self, decoder):\n",
+ " self.model = decoder\n",
+ "\n",
+ " def get_decoder(self):\n",
+ " return self.model\n",
+ "\n",
+ " def forward(\n",
+ " self,\n",
+ " input_ids: torch.LongTensor = None,\n",
+ " attention_mask: Optional[torch.Tensor] = None,\n",
+ " position_ids: Optional[torch.LongTensor] = None,\n",
+ " past_key_values: Optional[List[torch.FloatTensor]] = None,\n",
+ " inputs_embeds: Optional[torch.FloatTensor] = None,\n",
+ " labels: Optional[torch.LongTensor] = None,\n",
+ " use_cache: Optional[bool] = None,\n",
+ " output_attentions: Optional[bool] = None,\n",
+ " output_hidden_states: Optional[bool] = None,\n",
+ " return_dict: Optional[bool] = None,\n",
+ " ) -> Union[Tuple, CausalLMOutputWithPast]:\n",
+ "\n",
+ " output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n",
+ " output_hidden_states = (\n",
+ " output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n",
+ " )\n",
+ " return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n",
+ "\n",
+ " # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n",
+ " outputs = self.model(\n",
+ " input_ids=input_ids,\n",
+ " attention_mask=attention_mask,\n",
+ " position_ids=position_ids,\n",
+ " past_key_values=past_key_values,\n",
+ " inputs_embeds=inputs_embeds,\n",
+ " use_cache=use_cache,\n",
+ " output_attentions=output_attentions,\n",
+ " output_hidden_states=output_hidden_states,\n",
+ " return_dict=return_dict,\n",
+ " )\n",
+ "\n",
+ " hidden_states = outputs[0]\n",
+ " logits = self.lm_head(hidden_states / (self.config.hidden_size / self.config.dim_model_base))\n",
+ " logits = logits.float()\n",
+ "\n",
+ " loss = None\n",
+ " if labels is not None:\n",
+ " # Shift so that tokens < n predict n\n",
+ " shift_logits = logits[..., :-1, :].contiguous()\n",
+ " shift_labels = labels[..., 1:].contiguous()\n",
+ " # Flatten the tokens\n",
+ " loss_fct = CrossEntropyLoss()\n",
+ " shift_logits = shift_logits.view(-1, self.config.vocab_size)\n",
+ " shift_labels = shift_labels.view(-1)\n",
+ " # Enable model parallelism\n",
+ " shift_labels = shift_labels.to(shift_logits.device)\n",
+ " loss = loss_fct(shift_logits, shift_labels)\n",
+ "\n",
+ " if not return_dict:\n",
+ " output = (logits,) + outputs[1:]\n",
+ " return (loss,) + output if loss is not None else output\n",
+ "\n",
+ " return CausalLMOutputWithPast(\n",
+ " loss=loss,\n",
+ " logits=logits,\n",
+ " past_key_values=outputs.past_key_values,\n",
+ " hidden_states=outputs.hidden_states,\n",
+ " attentions=outputs.attentions,\n",
+ " )\n",
+ "\n",
+ " def prepare_inputs_for_generation(\n",
+ " self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs\n",
+ " ):\n",
+ " if past_key_values is not None:\n",
+ " if isinstance(past_key_values, Cache):\n",
+ " cache_length = past_key_values.get_seq_length()\n",
+ " past_length = past_key_values.seen_tokens\n",
+ " max_cache_length = past_key_values.get_max_length()\n",
+ " else:\n",
+ " cache_length = past_length = past_key_values[0][0].shape[2]\n",
+ " max_cache_length = None\n",
+ "\n",
+ " # Keep only the unprocessed tokens:\n",
+ " # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where\n",
+ " # some of the inputs are exclusivelly passed as part of the cache (e.g. when passing input_embeds as\n",
+ " # input)\n",
+ " if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:\n",
+ " input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]\n",
+ " # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard\n",
+ " # input_ids based on the past_length.\n",
+ " elif past_length < input_ids.shape[1]:\n",
+ " input_ids = input_ids[:, past_length:]\n",
+ " # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.\n",
+ "\n",
+ " # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.\n",
+ " if (\n",
+ " max_cache_length is not None\n",
+ " and attention_mask is not None\n",
+ " and cache_length + input_ids.shape[1] > max_cache_length\n",
+ " ):\n",
+ " attention_mask = attention_mask[:, -max_cache_length:]\n",
+ "\n",
+ " position_ids = kwargs.get(\"position_ids\", None)\n",
+ " if attention_mask is not None and position_ids is None:\n",
+ " # create position_ids on the fly for batch generation\n",
+ " position_ids = attention_mask.long().cumsum(-1) - 1\n",
+ " position_ids.masked_fill_(attention_mask == 0, 1)\n",
+ " if past_key_values:\n",
+ " position_ids = position_ids[:, -input_ids.shape[1] :]\n",
+ "\n",
+ " # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n",
+ " if inputs_embeds is not None and past_key_values is None:\n",
+ " model_inputs = {\"inputs_embeds\": inputs_embeds}\n",
+ " else:\n",
+ " model_inputs = {\"input_ids\": input_ids}\n",
+ "\n",
+ " model_inputs.update(\n",
+ " {\n",
+ " \"position_ids\": position_ids,\n",
+ " \"past_key_values\": past_key_values,\n",
+ " \"use_cache\": kwargs.get(\"use_cache\"),\n",
+ " \"attention_mask\": attention_mask,\n",
+ " }\n",
+ " )\n",
+ " return model_inputs\n",
+ "\n",
+ " @staticmethod\n",
+ " def _reorder_cache(past_key_values, beam_idx):\n",
+ " reordered_past = ()\n",
+ " for layer_past in past_key_values:\n",
+ " reordered_past += (\n",
+ " tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),\n",
+ " )\n",
+ " return reordered_past\n",
+ " \n",
+ " @torch.inference_mode()\n",
+ " def chat(self, tokenizer, query: str, history: List[Dict] = None, role: str = \"user\",\n",
+ " max_length: int = 4096, num_beams=1, do_sample=True, top_p=0.8, temperature=0.3, logits_processor=None,\n",
+ " **kwargs):\n",
+ " if history is None:\n",
+ " history = []\n",
+ " if logits_processor:\n",
+ " gen_kwargs = {\"max_length\": max_length, \"num_beams\": num_beams, \"do_sample\": do_sample, \"top_p\": top_p,\n",
+ " \"temperature\": temperature, \"logits_processor\": logits_processor, **kwargs}\n",
+ " else:\n",
+ " gen_kwargs = {\"max_length\": max_length, \"num_beams\": num_beams, \"do_sample\": do_sample, \"top_p\": top_p,\n",
+ " \"temperature\": temperature, \"logits_processor\": logits_processor, **kwargs}\n",
+ " \n",
+ " history.append({\"role\": role, \"content\": query})\n",
+ " history_str = tokenizer.apply_chat_template(history, tokenize=False, add_generation_prompt=False)\n",
+ " inputs = tokenizer(history_str, return_tensors='pt').to(self.device)\n",
+ " outputs = self.generate(**inputs, **gen_kwargs)\n",
+ " outputs = outputs.tolist()[0][len(inputs[\"input_ids\"][0]):-1]\n",
+ " response = tokenizer.decode(outputs)\n",
+ " pattern = re.compile(r\".*?(?=|<用户>)\", re.DOTALL)\n",
+ " matches = pattern.findall(response)\n",
+ " if len(matches) > 0:\n",
+ " response = matches[0]\n",
+ " history.append({\"role\": \"assistant\", \"content\": response})\n",
+ " return response, history\n",
+ " \n",
+ " '''进行推理'''\n",
+ " @torch.no_grad()\n",
+ " def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None):\n",
+ " # 推理阶段,输入为 idx,维度为 (batch size, sequence length),max_new_tokens 为最大生成的 token 数量即按序推理 max_new_tokens 次\n",
+ " for _ in range(max_new_tokens):\n",
+ " # 如果输入序列太长,我们需要将它截断到 block_size\n",
+ " idx_cond = idx if idx.size(1) <= self.config.block_size else idx[:, -self.config.block_size:]\n",
+ " # 前向计算,得到 logits,维度为 (batch size, sequence length, vocab size)\n",
+ " logits, _ = self(idx_cond)\n",
+ " # 使用最后一个 token 的 logits 作为当前输出,除以温度系数控制其多样性\n",
+ " logits = logits[:, -1, :] / temperature\n",
+ " # 如果使用 Top K 采样,将 logits 中除了 top_k 个元素的概率置为 0\n",
+ " if top_k is not None:\n",
+ " v, _ = torch.topk(logits, min(top_k, logits.size(-1)))\n",
+ " logits[logits < v[:, [-1]]] = -float('Inf')\n",
+ " # 对输出结果进行 Softmax\n",
+ " probs = F.softmax(logits, dim=-1)\n",
+ " # 对结果概率进行采样\n",
+ " idx_next = torch.multinomial(probs, num_samples=1)\n",
+ " # 将输出结果拼接到输入序列后面,作为下一次的输入\n",
+ " idx = torch.cat((idx, idx_next), dim=1)\n",
+ " # print(\"idx:\", idx)\n",
+ "\n",
+ " return idx\n"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "aiLLM",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.9.6"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/Model_Architecture_Discussions/MiniCPM/README.md b/Model_Architecture_Discussions/MiniCPM/README.md
new file mode 100644
index 0000000..cce4daf
--- /dev/null
+++ b/Model_Architecture_Discussions/MiniCPM/README.md
@@ -0,0 +1,151 @@
+---
+language:
+- en
+- zh
+tags:
+- MiniCPM
+- ModelBest
+- THUNLP
+---
+
+
+
+
+ MiniCPM
+
+
+
+
+MiniCPM 技术报告 Technical Report |
+OmniLMM 多模态模型 Multi-modal Model |
+CPM-C 千亿模型试用 ~100B Model Trial
+
+
+MiniCPM 是面壁与清华大学自然语言处理实验室共同开源的系列端侧语言大模型,主体语言模型 MiniCPM-2B 仅有 24亿(2.4B)的非词嵌入参数量。
+- 经过 SFT 后,MiniCPM 在公开综合性评测集上,MiniCPM 与 Mistral-7B相近(中文、数学、代码能力更优),整体性能超越 Llama2-13B、MPT-30B、Falcon-40B 等模型。
+- 经过 DPO 后,MiniCPM 在当前最接近用户体感的评测集 MTBench上,MiniCPM-2B 也超越了 Llama2-70B-Chat、Vicuna-33B、Mistral-7B-Instruct-v0.1、Zephyr-7B-alpha 等众多代表性开源大模型。
+- 以 MiniCPM-2B 为基础构建端侧多模态大模型 MiniCPM-V,整体性能在同规模模型中实现最佳,超越基于 Phi-2 构建的现有多模态大模型,在部分评测集上达到与 9.6B Qwen-VL-Chat 相当甚至更好的性能。
+- 经过 Int4 量化后,MiniCPM 可在手机上进行部署推理,流式输出速度略高于人类说话速度。MiniCPM-V 也首次跑通了多模态大模型在手机上的部署。
+- 一张1080/2080可高效参数微调,一张3090/4090可全参数微调,一台机器可持续训练 MiniCPM,二次开发成本较低。
+
+我们将完全开源MiniCPM-2B的模型参数供学术研究和有限商用,以及训练过程中的所有Checkpoint和大部分非专有数据供模型机理研究。
+
+- 基于MiniCPM-2B的指令微调与人类偏好对**MiniCPM-2B-SFT/DPO。**
+- 基于MiniCPM-2B的多模态模型**MiniCPM-V**,能力超越基于Phi-2的同参数级别多模态模型**。**
+- MiniCPM-2B-SFT/DPO的Int4量化版**MiniCPM-2B-SFT/DPO-Int4。**
+- 基于MLC-LLM、LLMFarm开发的MiniCPM手机端程序,**文本及多模态模型均可在手机端进行推理。**
+
+
+MiniCPM is an End-Size LLM developed by ModelBest Inc. and TsinghuaNLP, with only 2.4B parameters excluding embeddings.
+
+- MiniCPM has very close performance compared with Mistral-7B on open-sourced general benchmarks with better ability on Chinese, Mathmetics and Coding after SFT. The overall performance exceeds Llama2-13B, MPT-30B, Falcon-40B, etc.
+- After DPO, MiniCPM outperforms Llama2-70B-Chat, Vicuna-33B, Mistral-7B-Instruct-v0.1, Zephyr-7B-alpha, etc. on MTBench.
+- MiniCPM-V, based on MiniCPM-2B, achieves the best overall performance among multimodel models of the same scale, surpassing existing multimodal large models built on Phi-2 and achieving performance comparable to or even better than 9.6B Qwen-VL-Chat on some tasks.
+- MiniCPM can be deployed and infer on smartphones, and the speed of streaming output is relatively higher than the verbal speed of human. MiniCPM-V is the first multi-modal models that can be deployed on smartphones.
+- The cost of developing based on MiniCPM is low. Parameter efficient finetuning can be conducted with a single 1080/2080 GPU and full parameter finetuning can be conducted with a 3090/4090 GPU.
+
+We release all model parameters for research and limited commercial use. We also release all the checkpoint during training and most public training data for research on model mechanism.
+
+- SFT and DPO version based on MiniCPM-2B and human preference: **MiniCPM-2B-SFT/DPO**
+- The multi-modal model **MiniCPM-V** based on MiniCPM-2B, which outperforms models with similar size, i.e., Phi-2
+- The INT4 quantized version **MiniCPM-2B-SFT/DPO-Int4** based on MiniCPM-2B-SFT/DPO
+- Mobile phone application based on MLC-LLM and LLMFarm. Both language model and multimodel model can conduct inference on smartphones.
+
+### 评测结果 Evaluation Results
+
+ 详细的评测结果位于[github仓库](https://github.com/OpenBMB/MiniCPM?tab=readme-ov-file#%E8%AF%84%E6%B5%8B%E7%BB%93%E6%9E%9C)
+
+ Detailed evaluation results are in [github repo](https://github.com/OpenBMB/MiniCPM/blob/main/README-en.md#evaluation-results)
+
+ 注意:我们发现使用Huggingface生成质量略差于vLLM,因此推荐使用vLLM进行测试。我们正在排查原因。
+
+ Notice: We discovered that the quality of Huggingface generation is slightly lower than vLLM, thus benchmarking using vLLM is recommended.
+ We are investigating the cause now.
+
+### 局限性 Limitations
+
+- 受限于模型规模,模型可能出现幻觉性问题。其中由于DPO模型生成的回复内容更长,更容易出现幻觉。我们也将持续进行MiniCPM模型的迭代改进;
+- 为了保证在学术研究用途上模型的通用性,我们未对模型进行任何身份认同训练。同时由于我们用ShareGPT开源语料作为部分训练数据,模型可能会输出类似GPT系列模型的身份认同信息;
+- 受限于模型规模,模型的输出受到提示词(prompt)的影响较大,可能多次尝试产生不一致的结果;
+- 受限于模型容量,模型的知识记忆较不准确,后续我们将结合RAG方法来增强模型的知识记忆能力。
+
+- Due to limitations in model size, the model may experience hallucinatory issues. As DPO model tend to generate longer response, hallucinations are more likely to occur. We will also continue to iterate and improve the MiniCPM model.
+- To ensure the universality of the model for academic research purposes, we did not conduct any identity training on the model. Meanwhile, as we use ShareGPT open-source corpus as part of the training data, the model may output identity information similar to the GPT series models.
+- Due to the limitation of model size, the output of the model is greatly influenced by prompt words, which may result in inconsistent results from multiple attempts.
+- Due to limited model capacity, the model's knowledge memory is not accurate. In the future, we will combine the RAG method to enhance the model's knowledge memory ability.
+
+## 模型下载 Download
+
+ | HuggingFace | ModelScope | WiseModel |
+ |-------------|------------|-----------|
+ |[sft-bf16](https://huggingface.co/openbmb/MiniCPM-2B-sft-bf16)|[sft-bf16](https://modelscope.cn/models/OpenBMB/miniCPM-bf16)|[sft-bf16](https://wisemodel.cn/models/OpenBMB/miniCPM-bf16)
+ |[sft-fp32](https://huggingface.co/openbmb/MiniCPM-2B-sft-fp32)|[sft-fp32](https://modelscope.cn/models/OpenBMB/MiniCPM-2B-sft-fp32)|[sft-fp32](https://wisemodel.cn/models/OpenBMB/miniCPM-dpo-fp32)
+ |[dpo-bf16](https://huggingface.co/openbmb/MiniCPM-2B-dpo-bf16)|[dpo-bf16](https://modelscope.cn/models/OpenBMB/MiniCPM-2B-dpo-bf16/summary)|[dpo-bf16](https://wisemodel.cn/models/OpenBMB/MiniCPM-2B-dpo-bf16)
+ |[dpo-fp16](https://huggingface.co/openbmb/MiniCPM-2B-dpo-fp16)|[dpo-fp16](https://modelscope.cn/models/OpenBMB/MiniCPM-2B-dpo-fp16/)|[dpo-fp16](https://wisemodel.cn/models/OpenBMB/MiniCPM-2B-dpo-fp16)
+ |[dpo-fp32](https://huggingface.co/openbmb/MiniCPM-2B-dpo-fp32)|[dpo-fp32](https://modelscope.cn/models/OpenBMB/MiniCPM-2B-dpo-fp32)|[dpo-fp32](https://wisemodel.cn/models/OpenBMB/miniCPM-dpo-fp32)
+
+## 模型使用 Usage
+
+* 安装`transformers>=4.36.0`以及`accelerate`后,运行以下代码
+* 注意:需要在`from_pretrained`中明确指明模型的数据类型,否则会引起较大计算误差
+* Run the following code after install `transformers>=4.36.0` and `accelerate`
+* Warning: It is necessary to specify the data type of the model clearly in 'from_pretrained', otherwise large calculation errors will be caused
+```python
+from modelscope import AutoModelForCausalLM, AutoTokenizer
+import torch
+torch.manual_seed(0)
+
+path = 'OpenBMB/MiniCPM-2B-dpo-bf16'
+tokenizer = AutoTokenizer.from_pretrained(path)
+model = AutoModelForCausalLM.from_pretrained(path, torch_dtype=torch.bfloat16, device_map='cuda', trust_remote_code=True)
+
+responds, history = model.chat(tokenizer, "山东省最高的山是哪座山, 它比黄山高还是矮?差距多少?", temperature=0.8, top_p=0.8)
+print(responds)
+```
+
+* 期望输出 Expected Output
+```shell
+山东省最高的山是泰山,海拔1545米。
+
+相对于黄山(海拔1864米),泰山海拔较低,相差约319米。
+```
+
+## 开源协议 LICENSE
+
+#### 模型协议 Model LICENSE
+
+* 本仓库中代码依照 [Apache-2.0](https://github.com/OpenBMB/MiniCPM/blob/main/LICENSE) 协议开源
+* MiniCPM 模型权重的使用则需要遵循 [“通用模型许可协议-来源说明-宣传限制-商业授权”](https://github.com/OpenBMB/General-Model-License/blob/main/%E9%80%9A%E7%94%A8%E6%A8%A1%E5%9E%8B%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE-%E6%9D%A5%E6%BA%90%E8%AF%B4%E6%98%8E-%E5%AE%A3%E4%BC%A0%E9%99%90%E5%88%B6-%E5%95%86%E4%B8%9A%E6%8E%88%E6%9D%83.md)。
+* MiniCPM 模型权重对学术研究完全开放。
+* 如需将模型用于商业用途,请联系cpm@modelbest.cn来获取书面授权,在登记后亦允许免费商业使用。
+
+* This repository is released under the [Apache-2.0](https://github.com/OpenBMB/MiniCPM/blob/main/LICENSE) License.
+* The usage of MiniCPM model weights must strictly follow [the General Model License (GML)](https://github.com/OpenBMB/General-Model-License/blob/main/%E9%80%9A%E7%94%A8%E6%A8%A1%E5%9E%8B%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE-%E6%9D%A5%E6%BA%90%E8%AF%B4%E6%98%8E-%E5%AE%A3%E4%BC%A0%E9%99%90%E5%88%B6-%E5%95%86%E4%B8%9A%E6%8E%88%E6%9D%83.md).
+* The models and weights of MiniCPM are completely free for academic research.
+* If you intend to utilize the model for commercial purposes, please reach out to cpm@modelbest.cn to obtain the certificate of authorization.
+
+#### 声明 Statement
+
+* 作为一个语言模型,MiniCPM 通过学习大量的文本来生成内容,但它无法理解、表达个人观点或价值判断,它所输出的任何内容都不代表模型开发者的观点和立场。
+* 因此用户在使用 MiniCPM 生成的内容时,应自行负责对其进行评估和验证。
+* 如果由于使用 MinCPM 开源模型而导致的任何问题,包括但不限于数据安全问题、公共舆论风险,或模型被误导、滥用、传播或不当利用所带来的任何风险和问题,我们将不承担任何责任。
+
+* As a language model, MiniCPM generates content by learning from a vast amount of text.
+* However, it does not possess the ability to comprehend or express personal opinions or value judgments.
+* Any content generated by MiniCPM does not represent the viewpoints or positions of the model developers.
+* Therefore, when using content generated by MiniCPM, users should take full responsibility for evaluating and verifying it on their own.
+
+
+
+## 工作引用 Citation
+
+* 如果觉得MiniCPM有助于您的工作,请考虑引用下列[技术报告](https://shengdinghu.notion.site/MiniCPM-c805a17c5c8046398914e47f0542095a?pvs=4)
+* Please cite our [techinical report](https://shengdinghu.notion.site/MiniCPM-Unveiling-the-Potential-of-End-side-Large-Language-Models-d4d3a8c426424654a4e80e42a711cb20?pvs=4) if you find our work valuable.
+
+```
+@inproceedings{minicpm2024,
+ title={MiniCPM:Unveiling the Potential of End-side Large Language Models},
+ booktitle={OpenBMB Blog},
+ year={2024}
+}
+```
diff --git a/Model_Architecture_Discussions/MiniCPM/config.json b/Model_Architecture_Discussions/MiniCPM/config.json
new file mode 100644
index 0000000..610f7ef
--- /dev/null
+++ b/Model_Architecture_Discussions/MiniCPM/config.json
@@ -0,0 +1,32 @@
+{
+ "_name_or_path": "openbmb/CPM-2B",
+ "architectures": [
+ "MiniCPMForCausalLM"
+ ],
+ "auto_map": {
+ "AutoConfig": "configuration_minicpm.MiniCPMConfig",
+ "AutoModel": "modeling_minicpm.MiniCPMModel",
+ "AutoModelForCausalLM": "modeling_minicpm.MiniCPMForCausalLM",
+ "AutoModelForSeq2SeqLM": "modeling_minicpm.MiniCPMForCausalLM",
+ "AutoModelForSequenceClassification": "modeling_minicpm.MiniCPMForSequenceClassification"
+ },
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "hidden_act": "silu",
+ "hidden_size": 2304,
+ "initializer_range": 0.1,
+ "intermediate_size": 5760,
+ "max_position_embeddings": 2048,
+ "num_attention_heads": 36,
+ "num_hidden_layers": 40,
+ "num_key_value_heads": 36,
+ "rms_norm_eps": 1e-05,
+ "rope_scaling": null,
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.36.0",
+ "use_cache": true,
+ "vocab_size": 122753,
+ "scale_emb": 12,
+ "dim_model_base": 256,
+ "scale_depth": 1.4
+}
\ No newline at end of file
diff --git a/Model_Architecture_Discussions/MiniCPM/configuration_minicpm.py b/Model_Architecture_Discussions/MiniCPM/configuration_minicpm.py
new file mode 100644
index 0000000..ae7cbb9
--- /dev/null
+++ b/Model_Architecture_Discussions/MiniCPM/configuration_minicpm.py
@@ -0,0 +1,202 @@
+# coding=utf-8
+# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" MiniCPM model configuration"""
+
+from transformers.configuration_utils import PretrainedConfig
+import logging
+
+
+logger = logging.get_logger(__name__)
+
+MINICPM_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
+
+
+class MiniCPMConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`MiniCPMModel`]. It is used to instantiate an MiniCPM
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the MiniCPM-7B.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 32000):
+ Vocabulary size of the MiniCPM model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`MiniCPMModel`]
+ hidden_size (`int`, *optional*, defaults to 4096):
+ Dimension of the hidden representations.
+ intermediate_size (`int`, *optional*, defaults to 11008):
+ Dimension of the MLP representations.
+ num_hidden_layers (`int`, *optional*, defaults to 32):
+ Number of hidden layers in the Transformer decoder.
+ num_attention_heads (`int`, *optional*, defaults to 32):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ num_key_value_heads (`int`, *optional*):
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
+ by meanpooling all the original heads within that group. For more details checkout [this
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
+ `num_attention_heads`.
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
+ The non-linear activation function (function or string) in the decoder.
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
+ The maximum sequence length that this model might ever be used with. MiniCPM 1 supports up to 2048 tokens,
+ MiniCPM 2 up to 4096, CodeMiniCPM up to 16384.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
+ The epsilon used by the rms normalization layers.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ pad_token_id (`int`, *optional*):
+ Padding token id.
+ bos_token_id (`int`, *optional*, defaults to 1):
+ Beginning of stream token id.
+ eos_token_id (`int`, *optional*, defaults to 2):
+ End of stream token id.
+ pretraining_tp (`int`, *optional*, defaults to 1):
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
+ document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
+ issue](https://github.com/pytorch/pytorch/issues/76232).
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
+ Whether to tie weight embeddings
+ rope_theta (`float`, *optional*, defaults to 10000.0):
+ The base period of the RoPE embeddings.
+ rope_scaling (`Dict`, *optional*):
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
+ these scaling strategies behave:
+ https://www.reddit.com/r/LocalMiniCPM/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
+ experimental feature, subject to breaking API changes in future versions.
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+
+ ```python
+ >>> from transformers import MiniCPMModel, MiniCPMConfig
+
+ >>> # Initializing a MiniCPM minicpm-7b style configuration
+ >>> configuration = MiniCPMConfig()
+
+ >>> # Initializing a model from the minicpm-7b style configuration
+ >>> model = MiniCPMModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "minicpm"
+ keys_to_ignore_at_inference = ["past_key_values"]
+
+ def __init__(
+ self,
+ vocab_size=32000,
+ hidden_size=4096,
+ intermediate_size=11008,
+ num_hidden_layers=32,
+ num_attention_heads=32,
+ num_key_value_heads=None,
+ hidden_act="silu",
+ max_position_embeddings=2048,
+ initializer_range=0.02,
+ rms_norm_eps=1e-6,
+ use_cache=True,
+ pad_token_id=None,
+ bos_token_id=1,
+ eos_token_id=2,
+ pretraining_tp=1,
+ tie_word_embeddings=True,
+ rope_theta=10000.0,
+ rope_scaling=None,
+ attention_bias=False,
+ attention_dropout=0.0,
+ scale_emb=1,
+ dim_model_base=1,
+ scale_depth=1,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+
+ # for backward compatibility
+ if num_key_value_heads is None:
+ num_key_value_heads = num_attention_heads
+
+ self.num_key_value_heads = num_key_value_heads
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.rms_norm_eps = rms_norm_eps
+ self.pretraining_tp = pretraining_tp
+ self.use_cache = use_cache
+ self.rope_theta = rope_theta
+ self.rope_scaling = rope_scaling
+ self._rope_scaling_validation()
+ self.attention_bias = attention_bias
+ self.attention_dropout = attention_dropout
+ self.scale_emb = scale_emb
+ self.dim_model_base = dim_model_base
+ self.scale_depth = scale_depth
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ tie_word_embeddings=tie_word_embeddings,
+ **kwargs,
+ )
+ try:
+ import flash_attn
+ self._attn_implementation = "flash_attention_2"
+ except:
+ pass
+
+ def _rope_scaling_validation(self):
+ """
+ Validate the `rope_scaling` configuration.
+ """
+ if self.rope_scaling is None:
+ return
+
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
+ raise ValueError(
+ "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
+ f"got {self.rope_scaling}"
+ )
+ rope_scaling_type = self.rope_scaling.get("type", None)
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
+ raise ValueError(
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
+ )
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
diff --git a/Model_Architecture_Discussions/MiniCPM/generation_config.json b/Model_Architecture_Discussions/MiniCPM/generation_config.json
new file mode 100644
index 0000000..4881cde
--- /dev/null
+++ b/Model_Architecture_Discussions/MiniCPM/generation_config.json
@@ -0,0 +1,7 @@
+{
+ "do_sample": true,
+ "top_p": 0.8,
+ "temperature": 0.8,
+ "bos_token_id": 1,
+ "eos_token_id": 2
+}
\ No newline at end of file
diff --git a/Model_Architecture_Discussions/MiniCPM/gitattributes b/Model_Architecture_Discussions/MiniCPM/gitattributes
new file mode 100644
index 0000000..0c72ca2
--- /dev/null
+++ b/Model_Architecture_Discussions/MiniCPM/gitattributes
@@ -0,0 +1,35 @@
+*.7z filter=lfs diff=lfs merge=lfs -text
+*.arrow filter=lfs diff=lfs merge=lfs -text
+*.bin filter=lfs diff=lfs merge=lfs -text
+*.bin.* filter=lfs diff=lfs merge=lfs -text
+*.bz2 filter=lfs diff=lfs merge=lfs -text
+*.ftz filter=lfs diff=lfs merge=lfs -text
+*.gz filter=lfs diff=lfs merge=lfs -text
+*.h5 filter=lfs diff=lfs merge=lfs -text
+*.joblib filter=lfs diff=lfs merge=lfs -text
+*.lfs.* filter=lfs diff=lfs merge=lfs -text
+*.model filter=lfs diff=lfs merge=lfs -text
+*.msgpack filter=lfs diff=lfs merge=lfs -text
+*.onnx filter=lfs diff=lfs merge=lfs -text
+*.ot filter=lfs diff=lfs merge=lfs -text
+*.parquet filter=lfs diff=lfs merge=lfs -text
+*.pb filter=lfs diff=lfs merge=lfs -text
+*.pt filter=lfs diff=lfs merge=lfs -text
+*.pth filter=lfs diff=lfs merge=lfs -text
+*.rar filter=lfs diff=lfs merge=lfs -text
+saved_model/**/* filter=lfs diff=lfs merge=lfs -text
+*.tar.* filter=lfs diff=lfs merge=lfs -text
+*.tflite filter=lfs diff=lfs merge=lfs -text
+*.tgz filter=lfs diff=lfs merge=lfs -text
+*.xz filter=lfs diff=lfs merge=lfs -text
+*.zip filter=lfs diff=lfs merge=lfs -text
+*.zstandard filter=lfs diff=lfs merge=lfs -text
+*.tfevents* filter=lfs diff=lfs merge=lfs -text
+*.db* filter=lfs diff=lfs merge=lfs -text
+*.ark* filter=lfs diff=lfs merge=lfs -text
+**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
+**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
+**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
+*.safetensors filter=lfs diff=lfs merge=lfs -text
+*.ckpt filter=lfs diff=lfs merge=lfs -text
+pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
diff --git a/Model_Architecture_Discussions/MiniCPM/special_tokens_map.json b/Model_Architecture_Discussions/MiniCPM/special_tokens_map.json
new file mode 100644
index 0000000..451134b
--- /dev/null
+++ b/Model_Architecture_Discussions/MiniCPM/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/Model_Architecture_Discussions/MiniCPM/tokenizer.json b/Model_Architecture_Discussions/MiniCPM/tokenizer.json
new file mode 100644
index 0000000..209e947
--- /dev/null
+++ b/Model_Architecture_Discussions/MiniCPM/tokenizer.json
@@ -0,0 +1,294435 @@
+{
+ "version": "1.0",
+ "truncation": null,
+ "padding": null,
+ "added_tokens": [
+ {
+ "id": 0,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 1,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 2,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ }
+ ],
+ "normalizer": {
+ "type": "Sequence",
+ "normalizers": [
+ {
+ "type": "Prepend",
+ "prepend": "▁"
+ },
+ {
+ "type": "Replace",
+ "pattern": {
+ "String": " "
+ },
+ "content": "▁"
+ }
+ ]
+ },
+ "pre_tokenizer": null,
+ "post_processor": {
+ "type": "TemplateProcessing",
+ "single": [
+ {
+ "SpecialToken": {
+ "id": "",
+ "type_id": 0
+ }
+ },
+ {
+ "Sequence": {
+ "id": "A",
+ "type_id": 0
+ }
+ }
+ ],
+ "pair": [
+ {
+ "SpecialToken": {
+ "id": "",
+ "type_id": 0
+ }
+ },
+ {
+ "Sequence": {
+ "id": "A",
+ "type_id": 0
+ }
+ },
+ {
+ "SpecialToken": {
+ "id": "",
+ "type_id": 1
+ }
+ },
+ {
+ "Sequence": {
+ "id": "B",
+ "type_id": 1
+ }
+ }
+ ],
+ "special_tokens": {
+ "": {
+ "id": "",
+ "ids": [
+ 1
+ ],
+ "tokens": [
+ ""
+ ]
+ }
+ }
+ },
+ "decoder": {
+ "type": "Sequence",
+ "decoders": [
+ {
+ "type": "Replace",
+ "pattern": {
+ "String": "▁"
+ },
+ "content": " "
+ },
+ {
+ "type": "ByteFallback"
+ },
+ {
+ "type": "Fuse"
+ },
+ {
+ "type": "Strip",
+ "content": " ",
+ "start": 1,
+ "stop": 0
+ }
+ ]
+ },
+ "model": {
+ "type": "BPE",
+ "dropout": null,
+ "unk_token": "",
+ "continuing_subword_prefix": null,
+ "end_of_word_suffix": null,
+ "fuse_unk": true,
+ "byte_fallback": true,
+ "vocab": {
+ "": 0,
+ "": 1,
+ "": 2,
+ "": 3,
+ "": 4,
+ "\n": 5,
+ "\t": 6,
+ "
": 7,
+ "
": 8,
+ "": 9,
+ "": 10,
+ "": 11,
+ "
": 12,
+ "": 13,
+ " | | ": 14,
+ "": 15,
+ "": 16,
+ "": 17,
+ "": 18,
+ "": 21,
+ "": 22,
+ "
": 23,
+ "": 24,
+ "": 25,
+ "": 26,
+ "": 27,
+ "": 28,
+ "": 29,
+ "": 30,
+ "": 31,
+ "": 32,
+ "
": 33,
+ "
": 34,
+ "
": 35,
+ "": 36,
+ "": 37,
+ "": 38,
+ "
": 39,
+ "": 40,
+ "": 41,
+ "
": 42,
+ "": 43,
+ "
": 44,
+ "": 45,
+ "": 46,
+ "": 47,
+ "
": 48,
+ "": 49,
+ "": 50,
+ "": 51,
+ "0": 52,
+ "1": 53,
+ "2": 54,
+ "3": 55,
+ "4": 56,
+ "5": 57,
+ "6": 58,
+ "7": 59,
+ "8": 60,
+ "9": 61,
+ "+": 62,
+ "-": 63,
+ "=": 64,
+ ",": 65,
+ "。": 66,
+ "!": 67,
+ "?": 68,
+ "、": 69,
+ ":": 70,
+ "¥": 71,
+ ".": 72,
+ "!": 73,
+ "?": 74,
+ "...": 75,
+ "。。。": 76,
+ "。。。。。。": 77,
+ "《": 78,
+ "》": 79,
+ "【": 80,
+ "】": 81,
+ "『": 82,
+ "』": 83,
+ "```": 84,
+ "": 86,
+ "---": 87,
+ "": 88,
+ ";": 89,
+ ".": 90,
+ "=": 91,
+ "<": 92,
+ ">": 93,
+ "-": 94,
+ "+": 95,
+ "%": 96,
+ "‼": 97,
+ "㊣": 98,
+ "/": 99,
+ "|": 100,
+ "": 101,
+ "": 102,
+ "": 103,
+ "": 104,
+ "": 105,
+ "": 106,
+ "": 107,
+ "": 108,
+ "": 109,
+ "": 110,
+ "": 111,
+ "": 112,
+ "": 113,
+ "": 114,
+ "": 115,
+ "": 116,
+ "": 117,
+ "": 118,
+ "": 119,
+ "": 120,
+ "": 121,
+ "": 122,
+ "": 123,
+ "": 124,
+ "": 125,
+ "": 126,
+ "": 127,
+ "": 128,
+ "": 129,
+ "": 130,
+ "": 131,
+ "": 132,
+ "": 133,
+ "": 134,
+ "": 135,
+ "": 136,
+ "": 137,
+ "": 138,
+ "": 139,
+ "": 140,
+ "": 141,
+ "": 142,
+ "": 143,
+ "": 144,
+ "": 145,
+ "": 146,
+ "": 147,
+ "": 148,
+ "": 149,
+ "": 150,
+ "": 151,
+ "": 152,
+ "": 153,
+ "": 154,
+ "": 155,
+ "": 156,
+ "": 157,
+ "": 158,
+ "": 159,
+ "": 160,
+ "": 161,
+ "": 162,
+ "": 163,
+ "": 164,
+ "": 165,
+ "": 166,
+ "": 167,
+ "": 168,
+ "": 169,
+ "": 170,
+ "": 171,
+ "": 172,
+ "": 173,
+ "": 174,
+ "": 175,
+ "": 176,
+ "": 177,
+ "": 178,
+ "": 179,
+ "": 180,
+ "": 181,
+ "": 182,
+ "": 183,
+ "": 184,
+ "": 185,
+ "": 186,
+ "": 187,
+ "": 188,
+ "": 189,
+ "": 190,
+ "": 191,
+ "": 192,
+ "": 193,
+ "": 194,
+ "": 195,
+ "": 196,
+ "": 197,
+ "": 198,
+ "": 199,
+ "": 200,
+ "": 201,
+ "": 202,
+ "": 203,
+ "": 204,
+ "": 205,
+ "": 206,
+ "": 207,
+ "": 208,
+ "": 209,
+ "": 210,
+ "": 211,
+ "": 212,
+ "": 213,
+ "": 214,
+ "": 215,
+ "": 216,
+ "": 217,
+ "": 218,
+ "": 219,
+ "": 220,
+ "": 221,
+ "": 222,
+ "": 223,
+ "": 224,
+ "": 225,
+ "": 226,
+ "": 227,
+ "": 228,
+ "": 229,
+ "": 230,
+ "": 231,
+ "": 232,
+ "": 233,
+ "": 234,
+ "": 235,
+ "": 236,
+ "": 237,
+ "": 238,
+ "": 239,
+ "": 240,
+ "": 241,
+ "": 242,
+ "": 243,
+ "": 244,
+ "": 245,
+ "": 246,
+ "": 247,
+ "": 248,
+ "": 249,
+ "": 250,
+ "": 251,
+ "": 252,
+ "": 253,
+ "": 254,
+ "": 255,
+ "": 256,
+ "": 257,
+ "": 258,
+ "": 259,
+ "": 260,
+ "": 261,
+ "": 262,
+ "": 263,
+ "": 264,
+ "": 265,
+ "": 266,
+ "": 267,
+ "": 268,
+ "": 269,
+ "": 270,
+ "": 271,
+ "": 272,
+ "": 273,
+ "": 274,
+ "": 275,
+ "": 276,
+ "": 277,
+ "": 278,
+ "": 279,
+ "": 280,
+ "": 281,
+ "": 282,
+ "": 283,
+ "": 284,
+ "": 285,
+ "": 286,
+ "": 287,
+ "": 288,
+ "": 289,
+ "": 290,
+ "": 291,
+ "": 292,
+ "": 293,
+ "": 294,
+ "": 295,
+ "": 296,
+ "": 297,
+ "": 298,
+ "": 299,
+ "": 300,
+ "": 301,
+ "": 302,
+ "": 303,
+ "": 304,
+ "": 305,
+ "": 306,
+ "": 307,
+ "": 308,
+ "": 309,
+ "": 310,
+ "": 311,
+ "": 312,
+ "": 313,
+ "": 314,
+ "": 315,
+ "": 316,
+ "": 317,
+ "": 318,
+ "": 319,
+ "": 320,
+ "": 321,
+ "": 322,
+ "": 323,
+ "": 324,
+ "": 325,
+ "": 326,
+ "": 327,
+ "": 328,
+ "": 329,
+ "": 330,
+ "": 331,
+ "": 332,
+ "": 333,
+ "": 334,
+ "": 335,
+ "": 336,
+ "": 337,
+ "": 338,
+ "": 339,
+ "": 340,
+ "": 341,
+ "": 342,
+ "": 343,
+ "": 344,
+ "": 345,
+ "": 346,
+ "": 347,
+ "": 348,
+ "": 349,
+ "": 350,
+ "": 351,
+ "": 352,
+ "": 353,
+ "