Merge pull request #36 from Ethan-Chen-plus/main

add more models
This commit is contained in:
Ethan-Chen-plus 2024-06-01 18:26:54 +08:00 committed by GitHub
commit ee25faa084
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
14 changed files with 6993 additions and 0 deletions

View File

@ -0,0 +1,313 @@
"""Implements HF OpenELMConfig based on PretrainedConfig"""
from numbers import Number
from typing import List, Optional, Union
import numpy as np
from transformers import PretrainedConfig
def make_divisible(
v: Union[float, int],
divisor: Optional[int] = 8,
min_value: Optional[Union[float, int]] = None,
) -> Union[float, int]:
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by the divisor
It can be seen at:
https://github.com/tensorflow/models/blob/2cfc99eff5e5eb729c6793d2f3d03aa1c9be2b15/research/slim/nets/mobilenet/mobilenet.py#L62
Args:
v: input value
divisor: default to 8
min_value: minimum divisor value
Returns:
new_v: new divisible value
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def compute_heads(model_dim: int, head_dim: int) -> int:
"""Compute the number of heads.
Args:
model_dim: Model dimension.
head_dim: Head dimension.
Returns:
An integer denoting number of heads in multi-head attention is returned.
Raises:
ValueError: if model dimension is not divisible by head dimension.
"""
if model_dim % head_dim == 0:
return model_dim // head_dim
else:
raise ValueError(
f"Model dimension should be divisible by head dimension. Got: {model_dim} and {head_dim}."
)
OpenELM_CONFIGS = {
"OpenELM-270M": dict(
num_transformer_layers=16,
model_dim=1280,
head_dim=64,
num_gqa_groups=4,
normalize_qk_projections=True,
share_input_output_layers=True,
# Vary the FFN and QKV multipliers to create variable FFN and attention layers respectively.
ffn_multipliers=(0.5, 4.0),
qkv_multipliers=(0.5, 1.0),
),
"OpenELM-450M": dict(
num_transformer_layers=20,
model_dim=1536,
head_dim=64,
num_gqa_groups=4,
normalize_qk_projections=True,
share_input_output_layers=True,
# Vary the FFN and QKV multipliers to create variable FFN and attention layers respectively.
ffn_multipliers=(0.5, 4.0),
qkv_multipliers=(0.5, 1.0),
),
"OpenELM-1_1B": dict(
num_transformer_layers=28,
model_dim=2048,
head_dim=64,
num_gqa_groups=4,
normalize_qk_projections=True,
share_input_output_layers=True,
# Vary the FFN and QKV multipliers to create variable FFN and attention layers respectively.
ffn_multipliers=(0.5, 4.0),
qkv_multipliers=(0.5, 1.0),
),
"OpenELM-3B": dict(
num_transformer_layers=36,
model_dim=3072,
head_dim=128,
num_gqa_groups=4,
normalize_qk_projections=True,
share_input_output_layers=True,
# Vary the FFN and QKV multipliers to create variable FFN and attention layers respectively.
ffn_multipliers=(0.5, 4.0),
qkv_multipliers=(0.5, 1.0),
),
}
class OpenELMConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`OpenELMModel`]. It is used to instantiate an OpenELM model according to the specified arguments, defining the model architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the OpenELM model.
max_context_length (`int`, *optional*, defaults to 2048):
Maximum number of input tokens.
num_transformer_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer decoder.
model_dim (`int`, *optional*, defaults to 2048):
Dimension of the hidden representations.
head_dim (`int`, *optional*, defaults to 128):
The attention head dimension.
qkv_multipliers (`Union[Number, List[Number]]`, *optional*, defaults to 1.0):
If the qkv_multipliers is a Number, then all attention layers have the same latent dimensions,
resulting in uniform allocation of parameters.
If the qkv_multipliers is a List of Number, then each attention layer have different latent dimensions
assuming qkv_multipliers[0] != qkv_multipliers[1]. This results in variable allocation of parameters in attention layer.
This scaling is known as layer-wise or block-wise scaling: https://arxiv.org/abs/2008.00623
num_query_heads (`Union[int, None]`, *optional*, defaults to None):
The number of query heads, computed from `compute_heads(model_dim=model_dim, head_dim=head_dim)`.
num_gqa_groups (`int`, *optional*, defaults to 1):
This variable allows to switch between multi-head attention, group query attention, and multi-query attention.
When num_gqa_groups == 1, then it is multi-head attention.
When 1 < num_gqa_groups < num_heads and num_heads is divisible by num_gqa_groups, then it is group query attention
When num_gqa_groups == num_heads, then it is multi-query attention
ffn_multipliers (`Union[Number, List[Number]]`, *optional*, defaults to 4.0):
Feed-forward network (FFN) multipliers.
If the ffn_multipliers is a Number, then all FFN layers have the same latent dimensions,
resulting in uniform allocation of parameters.
If the ffn_multipliers is a List of Number, then each FFN layer have different latent dimensions
assuming ffn_multipliers[0] != ffn_multipliers[1]. This results in variable allocation of parameters in FFN layer.
This scaling is known as layer-wise or block-wise scaling: https://arxiv.org/abs/2008.00623
ffn_with_glu (`bool`, *optional*, defaults to True):
Whether to use FFN with Gated Linear Unit (GLU)
ffn_dim_divisor (`int`, *optional*, defaults to 256):
The ffn layer dimension divisor.
activation_fn_name (`str` or `function`, *optional*, defaults to `"swish"`):
The non-linear activation function (function or string) in the decoder.
normalization_layer_name (`str` or `function`, *optional*, defaults to `"rms_norm"`):
Type of normalization layer.
normalize_qk_projections (`bool`, *optional*, defaults to False):
Whether to normalize queries and keys after projections
share_input_output_layers (`bool`, *optional*, defaults to False):
Whether to share the embedding between input and output linear layer
rope_freq_constant (`int`, *optional*, defaults to 10000):
The base period of the RoPE embeddings.
rope_max_length (`int`, *optional*, defaults to 4096):
That rope_max_length is set to twice of max_context_length.
This allows flexibility in token lengths during training or fine-tuning.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
bos_token_id (`int`, *optional*, defaults to 2):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 1):
End of stream token id.
"""
model_type = "openelm"
def __init__(
self,
vocab_size: int = 32000,
max_context_length: int = 2048,
num_transformer_layers: int = 12,
model_dim: int = 2048,
head_dim: int = 128,
qkv_multipliers: Union[Number, List[Number]] = 1.0,
num_query_heads: Union[int, None] = None,
num_gqa_groups: int = 1,
ffn_multipliers: Union[Number, List[Number]] = 4.0,
ffn_with_glu: bool = True,
ffn_dim_divisor: int = 256,
activation_fn_name: str = "swish",
normalization_layer_name: str = "rms_norm",
normalize_qk_projections: bool = False,
share_input_output_layers: bool = False,
rope_freq_constant: int = 10000,
rope_max_length: int = 4096,
initializer_range: float = 0.02,
use_cache: bool = True,
bos_token_id: int = 1,
eos_token_id: int = 2,
**kwargs,
) -> None:
self.vocab_size = vocab_size
self.max_context_length = max_context_length
self.num_transformer_layers = num_transformer_layers
self.model_dim = model_dim
self.head_dim = head_dim
self.qkv_multipliers = qkv_multipliers
self.num_query_heads = num_query_heads
self.num_gqa_groups = num_gqa_groups
self.ffn_multipliers = ffn_multipliers
self.ffn_with_glu = ffn_with_glu
self.ffn_dim_divisor = ffn_dim_divisor
self.activation_fn_name = activation_fn_name
self.normalization_layer_name = normalization_layer_name
self.normalize_qk_projections = normalize_qk_projections
self.share_input_output_layers = share_input_output_layers
self.rope_freq_constant = rope_freq_constant
self.rope_max_length = rope_max_length
self.num_query_heads = (
compute_heads(model_dim=model_dim, head_dim=head_dim)
if num_query_heads is None
else num_query_heads
)
self.initializer_range = initializer_range
self.__post_init__()
super().__init__(
use_cache=use_cache,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
**kwargs,
)
def __post_init__(self) -> None:
if self.num_gqa_groups is not None:
head_multiple_of = self.num_gqa_groups
else:
head_multiple_of = 2
if isinstance(self.qkv_multipliers, Number):
# All attention layers have the same latent dimensions, resulting in uniform allocation of parameters.
qkv_dim = make_divisible(
self.model_dim * self.qkv_multipliers,
divisor=self.head_dim * head_multiple_of,
)
query_dims = [int(qkv_dim)] * self.num_transformer_layers
elif (
isinstance(self.qkv_multipliers, (tuple, list))
and len(self.qkv_multipliers) == 2
):
# Each attention layer have different latent dimensions assuming qkv_multipliers[0] != qkv_multipliers[1].
# This results in variable allocation of parameters in attention layer.
# This scaling is known as layer-wise or block-wise scaling: https://arxiv.org/abs/2008.00623
qkv_multipliers = [
round(v, 2)
for v in np.linspace(
self.qkv_multipliers[0],
self.qkv_multipliers[1],
num=self.num_transformer_layers,
dtype=float,
)
]
# Make sure that scaled model dimension is divisible by scaled head dimension.
query_dims = [
int(
make_divisible(
self.model_dim * m, divisor=self.head_dim * head_multiple_of
)
)
for m in qkv_multipliers
]
else:
raise NotImplementedError(
f"QKV multipliers should be a single number or a list containing exactly two numbers. Got: {qkv_multipliers}."
)
# compute the number of query, key, and value heads
# For multi-head and multi-query attention, the number of heads for query, key, and value are the same.
# For group query attention, the number of key and value heads are the same.
self.num_query_heads = [
int(compute_heads(q_dim, self.head_dim)) for q_dim in query_dims
]
self.num_kv_heads = [
q_heads // self.num_gqa_groups for q_heads in self.num_query_heads
]
# Feed-forward network (FFN) multipliers
if isinstance(self.ffn_multipliers, Number):
# All FFN layers have the same latent dimensions, resulting in uniform allocation of parameters.
self.ffn_multipliers = [self.ffn_multipliers] * self.num_transformer_layers
elif isinstance(self.ffn_multipliers, (tuple, list)):
# Each FFN layer have different latent dimensions assuming ffn_multipliers[0] != ffn_multipliers[1].
# This results in variable allocation of parameters in FFN layer.
# This scaling is known as layer-wise or block-wise scaling: https://arxiv.org/abs/2008.00623
if len(self.ffn_multipliers) == 2:
self.ffn_multipliers = [
round(v, 2)
for v in np.linspace(
self.ffn_multipliers[0],
self.ffn_multipliers[1],
num=self.num_transformer_layers,
dtype=float,
)
]
else:
assert (
len(self.ffn_multipliers) == self.num_transformer_layers
), f"{len(self.ffn_multipliers)=}!={self.num_transformer_layers=}"
else:
raise NotImplementedError(
f"FFN multipliers should be a single number or a list containing exactly two numbers. Got: {qkv_multipliers}."
)
# check num_query_heads divisible by num_kv_heads for every layer
for layer_idx in range(len(query_dims)):
assert self.num_query_heads[layer_idx] % self.num_kv_heads[layer_idx] == 0

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,295 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "dd05f32c-a90f-4122-b6d7-a5ec7b3b9ba0",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"env: HF_ENDPOINT=https://hf-mirror.com\n"
]
}
],
"source": [
"%env HF_ENDPOINT=https://hf-mirror.com"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "54f03217-da8d-4a05-9c85-9e0301a597e7",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"# 设置 HF_HOME 环境变量 设置下载路径\n",
"os.environ['HF_HOME'] = '/data1/ckw'"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "94cab483-b247-4aa8-9557-d15e459244af",
"metadata": {},
"outputs": [],
"source": [
"# 这个时候由于OpenELM还没有官方发布在transformer所以需要改下源码(已经有了更好的办法,因此不需要改源码了)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e2f3081d-f795-4f86-b80e-e915ae56b426",
"metadata": {},
"outputs": [],
"source": [
"# /data1/ckw/micromamba/envs/kewei-ai/lib/python3.11/site-packages/transformers/models/auto/tokenization_auto.py:909"
]
},
{
"cell_type": "markdown",
"id": "db03e7fd-d06f-4e78-842f-66c8e02043bd",
"metadata": {},
"source": [
"#### 1.3 AutoModelForCausalLM代码\n",
"\n",
"```python\n",
"class AutoModelForCausalLM:\n",
" def __init__(self):\n",
" raise EnvironmentError(\n",
" \"AutoModelForCausalLM is designed to be instantiated \"\n",
" \"using the `AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path)` or \"\n",
" \"`AutoModelForCausalLM.from_config(config)` methods.\"\n",
" )\n",
"\n",
"\t@classmethod\n",
" @replace_list_option_in_docstrings(MODEL_FOR_CAUSAL_LM_MAPPING, use_model_types=False)\n",
" def from_config(cls, config):\n",
"\n",
" if type(config) in MODEL_FOR_CAUSAL_LM_MAPPING.keys():\n",
" return MODEL_FOR_CAUSAL_LM_MAPPING[type(config)](config)\n",
" raise ValueError(\n",
" \"Unrecognized configuration class {} for this kind of AutoModel: {}.\\n\"\n",
" \"Model type should be one of {}.\".format(\n",
" config.__class__, cls.__name__, \", \".join(c.__name__ for c in MODEL_FOR_CAUSAL_LM_MAPPING.keys())\n",
" )\n",
" )\n",
"\n",
"\n",
"\t@classmethod\n",
" @replace_list_option_in_docstrings(MODEL_FOR_CAUSAL_LM_MAPPING)\n",
" @add_start_docstrings(\n",
" \"Instantiate one of the model classes of the library---with a causal language modeling head---from a \"\n",
" \"pretrained model.\",\n",
" AUTO_MODEL_PRETRAINED_DOCSTRING,\n",
" )\n",
" def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):\n",
" config = kwargs.pop(\"config\", None)\n",
" if not isinstance(config, PretrainedConfig):\n",
" config, kwargs = AutoConfig.from_pretrained(\n",
" pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs\n",
" )\n",
"\n",
" if type(config) in MODEL_FOR_CAUSAL_LM_MAPPING.keys():\n",
" return MODEL_FOR_CAUSAL_LM_MAPPING[type(config)].from_pretrained(\n",
" pretrained_model_name_or_path, *model_args, config=config, **kwargs\n",
" )\n",
" raise ValueError(\n",
" \"Unrecognized configuration class {} for this kind of AutoModel: {}.\\n\"\n",
" \"Model type should be one of {}.\".format(\n",
" config.__class__, cls.__name__, \", \".join(c.__name__ for c in MODEL_FOR_CAUSAL_LM_MAPPING.keys())\n",
" )\n",
" )\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "744c6db7-53f9-4911-adcb-4f0618693071",
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "7dd376f050c3496b904a5a545f499e07",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"tokenizer_config.json: 0%| | 0.00/265 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "4936fbb98c5446ebb60f4bdb288ddc73",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"tokenizer.model: 0%| | 0.00/500k [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "080e814bd03542aeb4a9f882c67ed06a",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"tokenizer.json: 0.00B [00:00, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "d04a2f9f4a57490bb70e88af4ab10008",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"added_tokens.json: 0%| | 0.00/21.0 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "6a728b39e23043459b8c2bddef6e8845",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"special_tokens_map.json: 0%| | 0.00/435 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"The attention mask and the pad token id were not set. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\n",
"Setting `pad_token_id` to `eos_token_id`:2 for open-end generation.\n"
]
},
{
"data": {
"text/plain": [
"'\\nDataWhalechina is an organization founded at Shanghai Jiao Tong University that helps learners learn artificial intelligence. The organization aims to provide AI-related courses to students in China.\\n\\nThis repository contains the code for the following courses:\\n\\n1. [Introduction to AI: Neural Networks and Classification](https://www.datawhalechina.com/courses/introduction-to-ai-neural-networks-and-classification/)\\n2. [Introduction to AI: Deep Learning and Applications](https://www.datawhalechina.com/courses/introduction-to-ai-deep-learning-and-applications/)\\n3. [Introduction to AI: Algorithms and Applications](https://www.datawhalechina.com/courses/introduction-to-ai-algorithms-and-applications/)\\n4. [Introduction to AI: Data Preparation and Model Evaluation](https://www.datawhalechina.com/courses/introduction-to-ai-data-preparation-and-model-evaluation/)\\n5. [Introduction to AI: Building and Evaluating AI Models](https://www.datawhalechina.com/courses/introduction-to-ai-building-and-evaluating-ai'"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from transformers import AutoTokenizer\n",
"from modeling_openelm import OpenELMForCausalLM\n",
"\n",
"model = OpenELMForCausalLM.from_pretrained(\"Apple/OpenELM-270M-Instruct\")#trust_remote_code=True\n",
"# tokenizer = AutoTokenizer.from_pretrained(\"Apple/OpenELM-270M-Instruct\")Llama-2-7b-hf\n",
"tokenizer = AutoTokenizer.from_pretrained(\"NousResearch/Llama-2-7b-chat-hf\")\n",
"prompt = '\\nDataWhalechina is an organization founded at Shanghai Jiao Tong University that helps learners learn artificial intelligence.'\n",
"inputs = tokenizer(prompt, return_tensors=\"pt\")\n",
"\n",
"# Generate\n",
"generate_ids = model.generate(inputs.input_ids, max_length=300)\n",
"tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]"
]
},
{
"cell_type": "raw",
"id": "6c0f8954-aca3-496b-86e4-843cdb00b104",
"metadata": {},
"source": [
"上面这个openelm的回复感觉还比较贴合datawhale的实际情况哈速度也是很快的没得说不过链接是编的哈哈"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "060b86f9-fda5-4d9f-8292-4d9464c7b2ef",
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"The attention mask and the pad token id were not set. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\n",
"Setting `pad_token_id` to `eos_token_id`:2 for open-end generation.\n"
]
},
{
"data": {
"text/plain": [
"\"\\nDataWhalechina is an organization founded at Shanghai Jiao Tong University that helps learners \\nimprove their Chinese language skills through data-driven learning.\\n\\n## Data\\n\\nThe DataWhalechina platform collects data from various sources, including:\\n\\n1. [China's National Database of Vocabulary and Phrase Structure](https://www.national-database.gov.cn/): This database contains vocabulary and phrase structure definitions for 1,000,000+ Chinese words and phrases.\\n\\n2. [China's National Academic Database of Literature and Culture](https://academic.lib.shu.edu.cn/): This database contains articles, books, and speeches written in Chinese by Chinese scholars.\\n\\n3. [China's National Knowledge Incorporation Database](https://knowledge.cn/): This database contains data on intellectual property rights, patents, and copyrights.\\n\\n4. [China's National Bureau of Statistics](https://www.stat.gov.cn/): This database contains statistics on population, living standards, and purchasing power.\\n\\n5. [China's National Bureau of Census](https://www.census.gov.cn/): This database contains\""
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"prompt = '\\nDataWhalechina is an organization founded at Shanghai Jiao Tong University that helps learners '\n",
"inputs = tokenizer(prompt, return_tensors=\"pt\")\n",
"\n",
"# Generate\n",
"generate_ids = model.generate(inputs.input_ids, max_length=300)\n",
"tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]"
]
},
{
"cell_type": "raw",
"id": "052ab03d-f739-40e5-9f48-e8ab3d0f5f19",
"metadata": {},
"source": [
"如果提示内容给的比较短,可能会在事实上面出一点小问题"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "kewei-ai",
"language": "python",
"name": "kewei-ai"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.5"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -0,0 +1,56 @@
from transformers.configuration_utils import PretrainedConfig
class GPTPanguConfig(PretrainedConfig):
model_type = "gpt_pangu"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=40000,
max_position_embeddings=1024,
hidden_size=1024,
intermediate_size=None,
num_layers=24,
num_heads=16,
activation_function="gelu",
resid_pdrop=0.1,
embd_pdrop=0.1,
attn_pdrop=0.1,
layer_norm_epsilon=1e-5,
scale_attn_weights=True,
initializer_range=0.02,
summary_type="cls_index",
summary_use_proj=True,
summary_activation=None,
summary_proj_to_labels=True,
summary_first_dropout=0.1,
use_cache=True,
# bos_token_id=9,
# eos_token_id=9,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_layers = num_layers
self.num_heads = num_heads
self.activation_function = activation_function
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.scale_attn_weights = scale_attn_weights
self.initializer_range = initializer_range
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_first_dropout = summary_first_dropout
self.summary_proj_to_labels = summary_proj_to_labels
self.use_cache = use_cache
# self.bos_token_id = bos_token_id
# self.eos_token_id = eos_token_id
super().__init__(**kwargs)

View File

@ -0,0 +1,549 @@
"""PyTorch PanguAlpha GPT2 Model"""
from configuration_gptpangu import GPTPanguConfig
from typing import Tuple
import math
import torch
from torch import nn
from transformers.activations import ACT2FN
from transformers.modeling_utils import PreTrainedModel
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from transformers.utils import logging
logger = logging.get_logger(__name__)
class GPTPanguAttention(nn.Module):
def __init__(self, config):
super().__init__()
max_positions = config.max_position_embeddings
self.register_buffer(
"bias",
torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view(
1, 1, max_positions, max_positions
),
)
self.register_buffer("masked_bias", torch.tensor(-1e4))
self.embed_dim = config.hidden_size
self.num_heads = config.num_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})."
)
self.scale_attn_weights = config.scale_attn_weights
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True)
self.c_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
def _attn(self, query, key, value, attention_mask=None, head_mask=None):
attn_weights = torch.matmul(query, key.transpose(-1, -2))
if self.scale_attn_weights:
attn_weights = attn_weights / (float(value.size(-1)) ** 0.5)
query_length, key_length = query.size(-2), key.size(-2)
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].bool()
attn_weights = torch.where(causal_mask, attn_weights, self.masked_bias.to(attn_weights.dtype))
if attention_mask is not None:
# Apply the attention mask
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
# Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise
attn_weights = attn_weights.type(value.dtype)
attn_weights = self.attn_dropout(attn_weights)
# Mask heads if we want to
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_output = torch.matmul(attn_weights, value)
return attn_output, attn_weights
def _split_heads(self, tensor, num_heads, attn_head_size):
"""
Splits hidden_size dim into attn_head_size and num_heads
"""
new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
tensor = tensor.view(*new_shape)
return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def _merge_heads(self, tensor, num_heads, attn_head_size):
"""
Merges attn_head_size dim and num_attn_heads dim into hidden_size
"""
tensor = tensor.permute(0, 2, 1, 3).contiguous()
new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
return tensor.view(new_shape)
def forward(
self,
hidden_states,
layer_past=None,
attention_mask=None,
head_mask=None,
custom_query=None,
use_cache=False,
output_attentions=False,
):
query = self.q_proj(custom_query) if custom_query is not None else self.q_proj(hidden_states)
key = self.k_proj(hidden_states)
value = self.v_proj(hidden_states)
query = self._split_heads(query, self.num_heads, self.head_dim)
key = self._split_heads(key, self.num_heads, self.head_dim)
value = self._split_heads(value, self.num_heads, self.head_dim)
if layer_past is not None:
past_key, past_value = layer_past
key = torch.cat((past_key, key), dim=-2)
value = torch.cat((past_value, value), dim=-2)
if use_cache is True:
present = (key, value)
else:
present = None
attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
attn_output = self.c_proj(attn_output)
attn_output = self.resid_dropout(attn_output)
outputs = (attn_output, present)
if output_attentions:
outputs += (attn_weights,)
return outputs # a, present, (attentions)
class GPTPanguMLP(nn.Module):
def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * hidden_size
super().__init__()
embed_dim = config.hidden_size
self.c_fc = nn.Linear(embed_dim, intermediate_size)
self.c_proj = nn.Linear(intermediate_size, embed_dim)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, hidden_states):
hidden_states = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.c_proj(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class GPTPanguBlock(nn.Module):
def __init__(self, config):
super().__init__()
hidden_size = config.hidden_size
inner_dim = config.intermediate_size if config.intermediate_size is not None else 4 * hidden_size
self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.attn = GPTPanguAttention(config)
self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.mlp = GPTPanguMLP(inner_dim, config)
def forward(
self,
hidden_states,
layer_past=None,
attention_mask=None,
head_mask=None,
custom_query=None,
use_cache=False,
output_attentions=False,
):
residual = hidden_states
hidden_states = self.ln_1(hidden_states)
attn_outputs = self.attn(
hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask,
custom_query=custom_query,
use_cache=use_cache,
output_attentions=output_attentions,
)
attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
outputs = attn_outputs[1:]
# residual connection
hidden_states = attn_output + residual
residual = hidden_states
hidden_states = self.ln_2(hidden_states)
feed_forward_hidden_states = self.mlp(hidden_states)
# residual connection
hidden_states = residual + feed_forward_hidden_states
if use_cache:
outputs = (hidden_states,) + outputs
else:
outputs = (hidden_states,) + outputs[1:]
return outputs # hidden_states, present, (attentions, cross_attentions)
class GPTPanguPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = GPTPanguConfig
base_model_prefix = "transformer"
supports_gradient_checkpointing = True
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, (nn.Linear,)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
#
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
for name, p in module.named_parameters():
if "c_proj" in name and "weight" in name:
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
p.data.normal_(mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.num_layers)))
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, GPTPanguModel):
module.gradient_checkpointing = value
class GPTPanguModel(GPTPanguPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embed_dim = config.hidden_size
self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
self.wqe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([GPTPanguBlock(config) for _ in range(config.num_layers)])
self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, new_embeddings):
self.wte = new_embeddings
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if past_key_values is None:
past_length = 0
past_key_values = tuple([None] * len(self.h))
else:
past_length = past_key_values[0][0].size(-2)
if position_ids is None:
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# GPT2Attention mask.
if attention_mask is not None:
if batch_size <= 0:
raise ValueError("batch_size has to be defined and > 0")
attention_mask = attention_mask.view(batch_size, -1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x num_heads x N x N
# head_mask has shape n_layer x batch x num_heads x N x N
head_mask = self.get_head_mask(head_mask, self.config.num_layers)
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
hidden_states = inputs_embeds + position_embeds
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
hidden_states = hidden_states + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
# top attention custom query
last_layer_id = len(self.h) - 1
query_embeds = self.wqe(position_ids)
presents = () if use_cache else None
all_self_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
# Final LayerNorm before last query layer
if i == last_layer_id:
hidden_states = self.ln_f(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, use_cache, output_attentions)
return custom_forward
outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hidden_states=hidden_states,
layer_past=None,
attention_mask=attention_mask,
head_mask=head_mask[i],
# custom query
custom_query=query_embeds if i == last_layer_id else None,
)
else:
outputs = block(
hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask[i],
# custom query
custom_query=query_embeds if i == last_layer_id else None,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = outputs[0]
if use_cache is True:
presents = presents + (outputs[1],)
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
hidden_states = hidden_states.view(*output_shape)
# Add last hidden state
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
class GPTPanguForCausalLM(GPTPanguPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = GPTPanguModel(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
token_type_ids = kwargs.get("token_type_ids", None)
# only last token for inputs_ids if past is defined in kwargs
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
if token_type_ids is not None:
token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
attention_mask = kwargs.get("attention_mask", None)
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.int().cumsum(-1).long() - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past:
position_ids = position_ids[:, -1].unsqueeze(-1)
else:
position_ids = None
return {
"input_ids": input_ids,
"past_key_values": past,
"use_cache": kwargs.get("use_cache"),
"position_ids": position_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to
``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=lm_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@staticmethod
def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:
"""
This function is used to re-order the :obj:`past_key_values` cache if
:meth:`~transformers.PreTrainedModel.beam_search` or :meth:`~transformers.PreTrainedModel.beam_sample` is
called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.
"""
return tuple(
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
for layer_past in past
)

View File

@ -0,0 +1,350 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "0364fa99-3cad-4c11-ac41-6523fb98d187",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"env: HF_ENDPOINT=https://hf-mirror.com\n"
]
}
],
"source": [
"%env HF_ENDPOINT=https://hf-mirror.com"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "c654b825-84fd-43df-8412-53b1f9ecb8c7",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"# 设置 HF_HOME 环境变量 设置下载路径\n",
"os.environ['HF_HOME'] = '/data1/ckw'"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "f30fc135-f12f-43bd-96e3-7ab02ef91296",
"metadata": {},
"outputs": [],
"source": [
"# %pip install jieba -q"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "e9e91c93-9b06-4cff-b826-02d1f4fecc5b",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Building prefix dict from the default dictionary ...\n",
"Loading model from cache /tmp/jieba.cache\n",
"Loading model cost 0.932 seconds.\n",
"Prefix dict has been built successfully.\n"
]
}
],
"source": [
"from tokenization_gptpangu import GPTPanguTokenizer"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "94abdb98-fb74-42c0-805b-03df9fd12311",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/data1/ckw/micromamba/envs/kewei-ai/lib/python3.11/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage()\n",
" return self.fget.__get__(instance, owner)()\n"
]
},
{
"data": {
"text/plain": [
"'\\nDataWhalechina is an organization founded at Shanghai Jiao Tong University that helps learners learn artificial intelligence.'"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# from transformers import AutoTokenizer\n",
"from modeling_gptpangu import GPTPanguForCausalLM\n",
"\n",
"model = GPTPanguForCausalLM.from_pretrained(\"sunzeyeah/pangu-350M-sft\")#trust_remote_code=True\n",
"# tokenizer = AutoTokenizer.from_pretrained(\"Apple/OpenELM-270M-Instruct\")Llama-2-7b-hf\n",
"tokenizer = GPTPanguTokenizer.from_pretrained(\"sunzeyeah/pangu-350M-sft\")\n",
"prompt = '\\nDataWhalechina is an organization founded at Shanghai Jiao Tong University that helps learners learn artificial intelligence.'\n",
"inputs = tokenizer(prompt, return_tensors=\"pt\")\n",
"\n",
"# Generate\n",
"generate_ids = model.generate(inputs.input_ids, max_length=300)\n",
"tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "09bf8f6e-8c64-4c32-b289-71aa897a9b3f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'中国和美国和日本和法国和加拿大和澳大利亚的首都分别是哪里?'"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"prompt = \"中国和美国和日本和法国和加拿大和澳大利亚的首都分别是哪里?\"\n",
"inputs = tokenizer(prompt, return_tensors=\"pt\")\n",
"\n",
"# Generate\n",
"generate_ids = model.generate(inputs.input_ids, max_length=300)\n",
"tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "d9eff78c-7abf-4b05-9335-286f789fbaf0",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[ 1, 96, 22, 337, 22, 691, 22, 3204, 22, 4672, 22, 6605,\n",
" 11, 6539, 1249, 16, 1329, 28, 9]])"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"inputs.input_ids"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "a554f163-4226-476e-b8e1-5efe45b7988c",
"metadata": {
"scrolled": true
},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[ 1, 96, 22, 337, 22, 691, 22, 3204, 22, 4672, 22, 6605,\n",
" 11, 6539, 1249, 16, 1329, 28, 9, 6, 6, 6, 6, 6,\n",
" 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,\n",
" 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,\n",
" 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,\n",
" 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,\n",
" 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,\n",
" 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,\n",
" 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,\n",
" 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,\n",
" 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,\n",
" 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,\n",
" 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,\n",
" 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,\n",
" 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,\n",
" 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,\n",
" 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,\n",
" 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,\n",
" 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,\n",
" 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,\n",
" 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,\n",
" 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,\n",
" 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,\n",
" 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,\n",
" 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6]])"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"generate_ids"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "8846ecb1-e912-49f2-8f80-acb6d3e5304b",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/data1/ckw/micromamba/envs/kewei-ai/lib/python3.11/site-packages/transformers/generation/configuration_utils.py:515: UserWarning: `do_sample` is set to `False`. However, `temperature` is set to `0.8` -- this flag is only used in sample-based generation modes. You should set `do_sample=True` or unset `temperature`.\n",
" warnings.warn(\n",
"/data1/ckw/micromamba/envs/kewei-ai/lib/python3.11/site-packages/transformers/generation/configuration_utils.py:520: UserWarning: `do_sample` is set to `False`. However, `top_p` is set to `0.8` -- this flag is only used in sample-based generation modes. You should set `do_sample=True` or unset `top_p`.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"['我也是这样,我也是这样,我也是这样,我也是这样,我也是这样,我也是这样,我也是这样,我也是这样,我也是这样,我也是这样,我也是这样,我也是这样,我也是这样,我也是这样,我也是这样,我也是这样,我也是这样,我也是这样,我也是这样,我也是这样,']\n"
]
}
],
"source": [
"prompt = \"我不能确定对方是不是喜欢我,我却想分分秒秒跟他在一起,有谁能告诉我如何能想他少一点<sep>回答:\"\n",
"inputs = tokenizer(prompt, add_special_tokens=False, return_token_type_ids=False, return_tensors=\"pt\")\n",
"outputs = model.generate(**inputs,\n",
" max_new_tokens=100,\n",
" pad_token_id=tokenizer.pad_token_id,\n",
" do_sample=False,\n",
" num_return_sequences=1,\n",
" top_p=0.8,\n",
" temperature=0.8)\n",
"results = tokenizer.batch_decode(outputs, skip_special_tokens=True)\n",
"results = [result.split(\"答:\", maxsplit=1)[1] for result in results]\n",
"print(results)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "065dc7a0-2efa-4d14-9130-e99720f4f98c",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['美国和日本和法国和加拿大和澳大利亚的首都分别是华盛顿和纽约']\n"
]
}
],
"source": [
"prompt = \"中国和美国和日本和法国和加拿大和澳大利亚的首都分别是哪里?<sep>回答:\"\n",
"inputs = tokenizer(prompt, add_special_tokens=False, return_token_type_ids=False, return_tensors=\"pt\")\n",
"outputs = model.generate(**inputs,\n",
" max_new_tokens=100,\n",
" pad_token_id=tokenizer.pad_token_id,\n",
" do_sample=False,\n",
" num_return_sequences=1,\n",
" top_p=0.8,\n",
" temperature=0.8)\n",
"results = tokenizer.batch_decode(outputs, skip_special_tokens=True)\n",
"results = [result.split(\"答:\", maxsplit=1)[1] for result in results]\n",
"print(results)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "2e5d28c2-3415-416e-817e-a596b766febe",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['中国和美国和日本和法国和加拿大和澳大利亚的首都分别是哪里?<sep>回答:美国和日本和法国和加拿大和澳大利亚的首都分别是华盛顿和纽约']"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tokenizer.batch_decode(outputs, skip_special_tokens=True)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "26acd04e-1462-49c2-b0dc-234d0a82db73",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['Datawhale是一个数据库,它是一个数据库管理系统,它是一个数据库管理系统,它是一个数据库管理系统,它是一个数据库管理系统,它是一个数据库管理系统,它是一个数据库管理系统,它是一个数据库管理系统,它是一个数据库管理系统,它是一个数据库管理系统,它是一个数据库管理系统,它是一个数据库管理系统,它是一个数据库管理系统,它是一个数据库管理系统,它']\n"
]
}
],
"source": [
"prompt = \"你知道有关datawhale的信息么<sep>回答:\"\n",
"inputs = tokenizer(prompt, add_special_tokens=False, return_token_type_ids=False, return_tensors=\"pt\")\n",
"outputs = model.generate(**inputs,\n",
" max_new_tokens=100,\n",
" pad_token_id=tokenizer.pad_token_id,\n",
" do_sample=False,\n",
" num_return_sequences=1,\n",
" top_p=0.8,\n",
" temperature=0.8)\n",
"results = tokenizer.batch_decode(outputs, skip_special_tokens=True)\n",
"results = [result.split(\"答:\", maxsplit=1)[1] for result in results]\n",
"print(results)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6c503178-b46b-445d-9555-bb529acecb47",
"metadata": {},
"outputs": [],
"source": [
"Pangu-350M经过sft,只有符合指令才会有输出.同时,数据量较少,还是不能涵盖很多问题"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.5"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -0,0 +1,129 @@
import torch
import sentencepiece
import jieba
import numpy as np
from transformers.tokenization_utils import PreTrainedTokenizer
jieba.add_word('<s>')
jieba.add_word('</s>')
jieba.add_word('<eot>')
jieba.add_word('<unk>')
jieba.add_word('<sep>')
jieba.add_word('<pad>')
class GPTPanguTokenizer(PreTrainedTokenizer):
# Ref: https://git.openi.org.cn/PCL-Platform.Intelligence/PanGu-Alpha/src/branch/master/tokenization_jieba.py
vocab_files_names = {
"model_file": "vocab.model"
}
def __init__(
self,
model_file,
**kwargs
):
self.sp = sentencepiece.SentencePieceProcessor()
self.sp.Load(model_file=model_file)
self.translator = str.maketrans(" \n", "\u2582\u2583")
super().__init__(**kwargs)
# special token ids
# self.eos_token_id = self.sp.piece_to_id("<eot>")
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if self.bos_token_id is not None:
if token_ids_1 is None:
return [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
bos = [self.bos_token_id]
sep = [self.sep_token_id]
eos = [self.eos_token_id]
return bos + token_ids_0 + sep + token_ids_1 + eos
else:
if token_ids_1 is None:
return token_ids_0 + [self.eos_token_id]
sep = [self.sep_token_id]
eos = [self.eos_token_id]
return token_ids_0 + sep + token_ids_1 + eos
def tokenize(self, text, **kwargs):
""" Tokenize a string. """
seg_list = [x.translate(self.translator) for x in jieba.cut(text, cut_all=False)]
return seg_list
def convert_tokens_to_ids(self, tokens):
if tokens is None:
return None
if isinstance(tokens, str):
return self._convert_token_to_id_with_added_voc(tokens)
special_tokens_index = [i for i, token in enumerate(tokens) if token in self.all_special_tokens]
ids = []
i = 0
for j in special_tokens_index:
new_seg = " ".join(tokens[i:j])
ids.extend(self.sp.encode(new_seg))
ids.append(self._convert_token_to_id(tokens[j]))
i = j + 1
new_seg = " ".join(tokens[i:])
ids.extend(self.sp.encode(new_seg))
return ids
# new_seg = " ".join(tokens)
# return self.sp.encode(new_seg)
# # return tokens
def _convert_token_to_id(self, token):
return self.sp.piece_to_id(token)
def _convert_id_to_token(self, index):
return self.sp.id_to_piece(index)
def convert_ids_to_tokens(self, ids):
return self.decode(ids)
def decode(self, ids, **kwargs):
if isinstance(ids, torch.Tensor) or isinstance(ids, np.ndarray):
ids = ids.tolist()
if kwargs.get('skip_special_tokens', None) is True:
ids = [token_id for token_id in ids if token_id not in self.all_special_ids]
text = self.sp.decode(ids)
if isinstance(text, list):
text = text[0]
text = text.replace(' ', '').replace('\u2582', ' ').replace('\u2583', '\n')#.replace('⁇', self.unk_token)
return text
def get_vocab(self):
vocab = {self.sp.IdToPiece(i): i for i in range(self.sp.GetPieceSize())}
return vocab
@property
def vocab_size(self) -> int:
"""
`int`: Size of the base vocabulary (without the added tokens).
"""
return len(self.sp)

View File

@ -0,0 +1,141 @@
import torch
import sentencepiece
import jieba
import numpy as np
from transformers.tokenization_utils import PreTrainedTokenizer
jieba.add_word('<s>')
jieba.add_word('</s>')
jieba.add_word('<eot>')
jieba.add_word('<unk>')
jieba.add_word('<sep>')
jieba.add_word('<pad>')
class GPTPanguTokenizer(PreTrainedTokenizer):
# Ref: https://git.openi.org.cn/PCL-Platform.Intelligence/PanGu-Alpha/src/branch/master/tokenization_jieba.py
vocab_files_names = {
"model_file": "vocab.model"
}
def __init__(
self,
model_file,
**kwargs
):
super().__init__(**kwargs)
self.sp = sentencepiece.SentencePieceProcessor()
self.sp.Load(model_file=model_file)
self.translator = str.maketrans(" \n", "\u2582\u2583")
# special token ids
# self.eos_token_id = self.sp.piece_to_id("<eot>")
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if self.bos_token_id is not None:
if token_ids_1 is None:
return [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
bos = [self.bos_token_id]
sep = [self.sep_token_id]
eos = [self.eos_token_id]
return bos + token_ids_0 + sep + token_ids_1 + eos
else:
if token_ids_1 is None:
return token_ids_0 + [self.eos_token_id]
sep = [self.sep_token_id]
eos = [self.eos_token_id]
return token_ids_0 + sep + token_ids_1 + eos
def _tokenize(self, text, **kwargs):
""" Tokenize a string. """
return self.sp.EncodeAsPieces(text)
# seg_list = [x.translate(self.translator) for x in jieba.cut(text, cut_all=False)]
# return seg_list
def _convert_token_to_id(self, token):
return self.sp.PieceToId(token)
def _convert_id_to_token(self, index):
return self.sp.IdToPiece(index)
def convert_tokens_to_ids(self, tokens):
if tokens is None:
return None
if isinstance(tokens, str):
return self._convert_token_to_id_with_added_voc(tokens)
special_tokens_index = [i for i, token in enumerate(tokens) if token in self.all_special_tokens]
ids = []
i = 0
for j in special_tokens_index:
new_seg = " ".join(tokens[i:j])
ids.extend(self.sp.encode(new_seg))
ids.append(self._convert_token_to_id(tokens[j]))
i = j + 1
new_seg = " ".join(tokens[i:])
ids.extend(self.sp.encode(new_seg))
return ids
# new_seg = " ".join(tokens)
# return self.sp.encode(new_seg)
# # return tokens
# def _convert_token_to_id(self, token):
# return self.sp.piece_to_id(token)
# def _convert_id_to_token(self, index):
# return self.sp.id_to_piece(index)
def convert_ids_to_tokens(self, ids):
return self.decode(ids)
def get_vocab(self):
vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
# print(dir(GPTPanguTokenizer))
# vocab = {self.sp.id_to_piece(i): i for i in range(len(self.sp))}
# return vocab
def decode(self, ids, **kwargs):
if isinstance(ids, torch.Tensor) or isinstance(ids, np.ndarray):
ids = ids.tolist()
if kwargs.get('skip_special_tokens', None) is True:
ids = [token_id for token_id in ids if token_id not in self.all_special_ids]
text = self.sp.decode(ids)
if isinstance(text, list):
text = text[0]
text = text.replace(' ', '').replace('\u2582', ' ').replace('\u2583', '\n')#.replace('⁇', self.unk_token)
return text
@property
def vocab_size(self) -> int:
"""
`int`: Size of the base vocabulary (without the added tokens).
"""
return self.tokenizer.n_words
# return len(self.sp)

View File

@ -0,0 +1,192 @@
"""Phi-3 model configuration"""
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import logging
logger = logging.get_logger(__name__)
class Phi3Config(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Phi3Model`]. It is used to instantiate a Phi-3
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the
[microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32064):
Vocabulary size of the Phi-3 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Phi3Model`].
hidden_size (`int`, *optional*, defaults to 3072):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 8192):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
`num_attention_heads`.
resid_pdrop (`float`, *optional*, defaults to 0.0):
Dropout probability for mlp outputs.
embd_pdrop (`int`, *optional*, defaults to 0.0):
The dropout ratio for the embeddings.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio after computing the attention scores.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model might ever be used with.
original_max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model was trained with. This is used to determine the size of the
original RoPE embeddings when using long scaling.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon value used for the RMSNorm.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`dict`, *optional*):
The scaling strategy for the RoPE embeddings. If `None`, no scaling is applied. If a dictionary, it must
contain the following keys: `type`, `short_factor` and `long_factor`. The `type` must be either `su` or `yarn` and
the `short_factor` and `long_factor` must be lists of numbers with the same length as the hidden size
divided by the number of attention heads divided by 2.
bos_token_id (`int`, *optional*, defaults to 1):
The id of the "beginning-of-sequence" token.
eos_token_id (`int`, *optional*, defaults to 32000):
The id of the "end-of-sequence" token.
pad_token_id (`int`, *optional*, defaults to 32000):
The id of the padding token.
sliding_window (`int`, *optional*):
Sliding window attention window size. If `None`, no sliding window is applied.
Example:
```python
>>> from transformers import Phi3Model, Phi3Config
>>> # Initializing a Phi-3 style configuration
>>> configuration = Phi3Config.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
>>> # Initializing a model from the configuration
>>> model = Phi3Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "phi3"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=32064,
hidden_size=3072,
intermediate_size=8192,
num_hidden_layers=32,
num_attention_heads=32,
num_key_value_heads=None,
resid_pdrop=0.0,
embd_pdrop=0.0,
attention_dropout=0.0,
hidden_act="silu",
max_position_embeddings=4096,
original_max_position_embeddings=4096,
initializer_range=0.02,
rms_norm_eps=1e-5,
use_cache=True,
tie_word_embeddings=False,
rope_theta=10000.0,
rope_scaling=None,
bos_token_id=1,
eos_token_id=32000,
pad_token_id=32000,
sliding_window=None,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attention_dropout = attention_dropout
self.hidden_act = hidden_act
self.max_position_embeddings = max_position_embeddings
self.original_max_position_embeddings = original_max_position_embeddings
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self._rope_scaling_validation()
self.sliding_window = sliding_window
super().__init__(
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
pad_token_id=pad_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
def _rope_scaling_validation(self):
"""
Validate the `rope_scaling` configuration.
"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 3:
raise ValueError(
"`rope_scaling` must be a dictionary with three fields, `type`, `short_factor` and `long_factor`, "
f"got {self.rope_scaling}"
)
rope_scaling_type = self.rope_scaling.get("type", None)
rope_scaling_short_factor = self.rope_scaling.get("short_factor", None)
rope_scaling_long_factor = self.rope_scaling.get("long_factor", None)
if rope_scaling_type is None or rope_scaling_type not in ["su", "yarn"]:
raise ValueError(f"`rope_scaling`'s type field must be one of ['su', 'yarn'], got {rope_scaling_type}")
if not (
isinstance(rope_scaling_short_factor, list)
and all(isinstance(x, (int, float)) for x in rope_scaling_short_factor)
):
raise ValueError(
f"`rope_scaling`'s short_factor field must be a list of numbers, got {rope_scaling_short_factor}"
)
if not len(rope_scaling_short_factor) == self.hidden_size // self.num_attention_heads // 2:
raise ValueError(
f"`rope_scaling`'s short_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_short_factor)}"
)
if not (
isinstance(rope_scaling_long_factor, list)
and all(isinstance(x, (int, float)) for x in rope_scaling_long_factor)
):
raise ValueError(
f"`rope_scaling`'s long_factor field must be a list of numbers, got {rope_scaling_long_factor}"
)
if not len(rope_scaling_long_factor) == self.hidden_size // self.num_attention_heads // 2:
raise ValueError(
f"`rope_scaling`'s long_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_long_factor)}"
)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,285 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "dd05f32c-a90f-4122-b6d7-a5ec7b3b9ba0",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"env: HF_ENDPOINT=https://hf-mirror.com\n"
]
}
],
"source": [
"%env HF_ENDPOINT=https://hf-mirror.com"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "744c6db7-53f9-4911-adcb-4f0618693071",
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "3fb1d88bb7d54d8d8681ab3862aa0590",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"config.json: 0%| | 0.00/476 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "325be298ec084bfb8c18d3fc60f78dc5",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"model.safetensors.index.json: 0%| | 0.00/832 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "05ade4e1f4d248ddbc14fe8c4431d765",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading shards: 0%| | 0/2 [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "452ca2bec6cb4fbf8fca383c43c4cc6b",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"model-00001-of-00002.safetensors: 0%| | 0.00/4.97G [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "483d2013cfd748d4a092c33159acaa99",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"model-00002-of-00002.safetensors: 0%| | 0.00/2.67G [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "68ec36a444a14750b81c4df4168722e5",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "1098c0ff0d974d5caa88b0010c82ed92",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"generation_config.json: 0%| | 0.00/172 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "73bd79df1db44f738fbbdc9632f45342",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"tokenizer_config.json: 0%| | 0.00/546 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "cd27d44e6f8143778af56b46f496f2b9",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"tokenizer.model: 0%| | 0.00/500k [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "6247143bea8044b3af8cf4ea30b03ec8",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"tokenizer.json: 0.00B [00:00, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "d8fe960d384c4f49bb71b14d654d268a",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"added_tokens.json: 0%| | 0.00/293 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "3a870f288de84e1f97fcd2b1b2bf3bd5",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"special_tokens_map.json: 0%| | 0.00/143 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.\n",
"You are not running the flash-attention implementation, expect numerical differences.\n"
]
},
{
"data": {
"text/plain": [
"'\\nDataWhalechina is an organization founded at Shanghai Jiao Tong University that helps learners learn artificial intelligence.\\n'"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from transformers import AutoTokenizer\n",
"from modeling_phi3 import Phi3ForCausalLM\n",
"\n",
"model = Phi3ForCausalLM.from_pretrained(\"microsoft/phi-3-mini-4k-instruct\")\n",
"tokenizer = AutoTokenizer.from_pretrained(\"microsoft/phi-3-mini-4k-instruct\")\n",
"\n",
"prompt = '\\nDataWhalechina is an organization founded at Shanghai Jiao Tong University that helps learners learn artificial intelligence.'\n",
"inputs = tokenizer(prompt, return_tensors=\"pt\")\n",
"\n",
"# Generate\n",
"generate_ids = model.generate(inputs.input_ids, max_length=300)\n",
"tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "060b86f9-fda5-4d9f-8292-4d9464c7b2ef",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\"\\nDataWhalechina is an organization founded at Shanghai Jiao Tong University that helps learners learn artificial \\nintelligence. We provide a variety of online courses for different skill levels and goals. Our courses are designed to be \\nengaging, interactive, and effective, with a focus on practical application and real-world problem-solving. Whether you're \\na beginner looking to get started in AI or an experienced professional looking to expand your skills, we have something \\nfor everyone.\\n\\nOur courses cover a wide range of topics, including but not limited to:\\n\\n1. Introduction to Artificial Intelligence: Learn the basics of AI, including its history, key concepts, and real-world applications.\\n2. Machine Learning: Explore the fundamentals of machine learning, including supervised and unsupervised learning, and popular \\nalgorithms such as linear regression, decision trees, and neural networks.\\n3. Deep Learning: Dive into the world of deep learning, including neural networks, convolutional neural networks (CNNs), and \\nrecurrent neural networks (RNNs).\\n4. Natural Language Processing (NLP): Learn how to build AI systems that can understand and generate human language, including \\nsentiment analysis, language translation, and chatbots.\\n5. Computer Vision: Discover how to teach computers\""
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"prompt = '\\nDataWhalechina is an organization founded at Shanghai Jiao Tong University that helps learners learn artificial '\n",
"inputs = tokenizer(prompt, return_tensors=\"pt\")\n",
"\n",
"# Generate\n",
"generate_ids = model.generate(inputs.input_ids, max_length=300)\n",
"tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]"
]
},
{
"cell_type": "raw",
"id": "6c0f8954-aca3-496b-86e4-843cdb00b104",
"metadata": {},
"source": [
"phi3的回复感觉还比较贴合datawhale的实际情况哈哈"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "kewei-ai",
"language": "python",
"name": "kewei-ai"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.5"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -0,0 +1,172 @@
"""Phi model configuration"""
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import logging
logger = logging.get_logger(__name__)
class PhiConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`PhiModel`]. It is used to instantiate an Phi
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Phi
[microsoft/phi-1](https://huggingface.co/microsoft/phi-1).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 51200):
Vocabulary size of the Phi model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`PhiModel`].
hidden_size (`int`, *optional*, defaults to 2048):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 8192):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
`num_attention_heads`.
resid_pdrop (`float`, *optional*, defaults to 0.0):
Dropout probability for mlp outputs.
embd_pdrop (`int`, *optional*, defaults to 0.0):
The dropout ratio for the embeddings.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio after computing the attention scores.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_new"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Phi-1 and Phi-1.5 supports up to 2048
tokens.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format
is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
`max_position_embeddings` to the expected new maximum. See the following thread for more information on how
these scaling strategies behave:
https://www.reddit.com/r/LocalPersimmon/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This
is an experimental feature, subject to breaking API changes in future versions.
partial_rotary_factor (`float`, *optional*, defaults to 0.5):
Percentage of the query and keys which will have rotary embedding.
qk_layernorm (`bool`, *optional*, defaults to `False`):
Whether or not to normalize the Queries and Keys after projecting the hidden states.
bos_token_id (`int`, *optional*, defaults to 1):
Denotes beginning of sequences token id.
eos_token_id (`int`, *optional*, defaults to 2):
Denotes end of sequences token id.
Example:
```python
>>> from transformers import PhiModel, PhiConfig
>>> # Initializing a Phi-1 style configuration
>>> configuration = PhiConfig.from_pretrained("microsoft/phi-1")
>>> # Initializing a model from the configuration
>>> model = PhiModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "phi"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=51200,
hidden_size=2048,
intermediate_size=8192,
num_hidden_layers=24,
num_attention_heads=32,
num_key_value_heads=None,
resid_pdrop=0.0,
embd_pdrop=0.0,
attention_dropout=0.0,
hidden_act="gelu_new",
max_position_embeddings=2048,
initializer_range=0.02,
layer_norm_eps=1e-5,
use_cache=True,
tie_word_embeddings=False,
rope_theta=10000.0,
rope_scaling=None,
partial_rotary_factor=0.5,
qk_layernorm=False,
bos_token_id=1,
eos_token_id=2,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attention_dropout = attention_dropout
self.hidden_act = hidden_act
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self.partial_rotary_factor = partial_rotary_factor
self.qk_layernorm = qk_layernorm
self._rope_scaling_validation()
super().__init__(
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
# Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation
def _rope_scaling_validation(self):
"""
Validate the `rope_scaling` configuration.
"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
)
rope_scaling_type = self.rope_scaling.get("type", None)
rope_scaling_factor = self.rope_scaling.get("factor", None)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
)
if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,472 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "a56ef5b3-a713-4852-a547-86796e4611f6",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"env: HF_ENDPOINT=https://hf-mirror.com\n"
]
}
],
"source": [
"%env HF_ENDPOINT=https://hf-mirror.com"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "fe693620-d5e3-4156-9084-9610bbc6d359",
"metadata": {},
"outputs": [],
"source": [
"from modeling_phi import PhiForCausalLM"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "2646666d-b298-4b91-b4fe-ab68b3e420f8",
"metadata": {},
"outputs": [],
"source": [
"from transformers import AutoTokenizer"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "e23c8612-7776-4d37-8923-0de3c27a2070",
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "ff5a4df0f3ee43ce804aae379d334d7d",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"config.json: 0%| | 0.00/411 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "2e17b9fb38054c608c2d8e11f44af008",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"model.safetensors: 0%| | 0.00/2.84G [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "d4d141ba9a2a472291fbe68d8a95039d",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"generation_config.json: 0%| | 0.00/74.0 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "e69170f35e1648039b8c4c194432090f",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"tokenizer_config.json: 0%| | 0.00/237 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "0c70b43439344ce3b078af37281336aa",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"vocab.json: 0.00B [00:00, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "694eb25535f842bc8d2ce3437d5c3a50",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"merges.txt: 0.00B [00:00, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "2d4c4995d2354662a883aaae62ffb6a8",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"tokenizer.json: 0.00B [00:00, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "eb6a91779dd64b52922c1f99a461b873",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"added_tokens.json: 0%| | 0.00/206 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "cec94dcfdab24948ad881e2951d616d3",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"special_tokens_map.json: 0%| | 0.00/99.0 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
"'This is an example script .\\n\\n\\n\\nfrom typing import List\\n\\ndef find_most_common_letter(words: List[str'"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"model = PhiForCausalLM.from_pretrained(\"microsoft/phi-1\")\n",
"tokenizer = AutoTokenizer.from_pretrained(\"microsoft/phi-1\")\n",
"\n",
"prompt = \"This is an example script .\"\n",
"inputs = tokenizer(prompt, return_tensors=\"pt\")\n",
"\n",
"# Generate\n",
"generate_ids = model.generate(inputs.input_ids, max_length=30)\n",
"tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "f89dd876-c7dd-41e6-9fc3-7f4417beacb1",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'\\nDataWhalechina is an organization founded at Shanghai Jiao Tong University that helps learners learn artificial intelligence.\\n\\nThe function takes in two lists:\\n- `artworks`: a list of strings representing the names of artworks\\n- `popularity`: a list of integers representing the popularity of each artwork\\n\\nThe function returns a string that lists the top three most popular artworks in descending order of popularity.\\n\\nIf there are less than three artworks in the'"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"prompt = '\\nDataWhalechina is an organization founded at Shanghai Jiao Tong University that helps learners learn artificial intelligence.'\n",
"inputs = tokenizer(prompt, return_tensors=\"pt\")\n",
"\n",
"# Generate\n",
"generate_ids = model.generate(inputs.input_ids, max_length=100)\n",
"tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "1b8380e9-6ce2-4493-8b9c-4d557a1df936",
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "1f05d39a84ce45f7b65b9472c91fe311",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"config.json: 0%| | 0.00/415 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "4603ec33e6834b38b8ea2663b7f1f0e5",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"model.safetensors.index.json: 0%| | 0.00/1.68k [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "1894f015353c496ea363a20d76da22fc",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading shards: 0%| | 0/2 [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "c4c5e0877c7c484683872a1a7cb65d0a",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"model-00001-of-00002.safetensors: 0%| | 0.00/5.00G [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "f192c8dda6be410098b0e2ed351aed39",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"model-00002-of-00002.safetensors: 0%| | 0.00/564M [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "6673a81b9b2b4541aefba462656088c8",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "30df242b8add432e84496e4c3f99562c",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"generation_config.json: 0%| | 0.00/124 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "955729ff155d454da0bee82400ee0802",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"tokenizer_config.json: 0%| | 0.00/459 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "1cbc3093bb8d48aaa64a0f989f36ed71",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"vocab.json: 0.00B [00:00, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "6eb463245f9943ae86810a78529b6261",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"merges.txt: 0.00B [00:00, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "cae16075fa3040899aa2fb0ce2dd0904",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"tokenizer.json: 0.00B [00:00, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "f1be0c47664149208251a3a5d207e0d7",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"added_tokens.json: 0%| | 0.00/206 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "9d27adaa3716459a9f8fe06ba9a20764",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"special_tokens_map.json: 0%| | 0.00/99.0 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.\n",
"The attention mask and the pad token id were not set. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\n",
"Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
]
},
{
"data": {
"text/plain": [
"'\\nDataWhalechina is an organization founded at Shanghai Jiao Tong University that helps learners learn artificial intelligence.\\n\\nDataWhale is a company that helps people learn about artificial intelligence. It was started by a group of people at Shanghai Jiao Tong University. They wanted to help people learn about AI and how it can be used in different ways.\\n\\nDataWhale has a special program called the DataWhale AI Lab. This program helps people learn about AI by giving them hands-on experience. They also have a special program called the DataWhale AI Lab for Industry, which helps people learn about AI in a real-world setting.\\n\\nDataWhale also has a special program called the DataWhale AI Lab for Education. This program helps teachers learn about AI so they can teach it to their students. They also have a special program called the DataWhale AI Lab for Research, which helps researchers learn about AI and how it can be used in their work.\\n\\nDataWhale is a very important organization because it helps people learn about AI. AI is a very important technology that can be used in many different ways. By learning about AI, people can use it to make their lives better and to solve problems in the world.\\n\\nTopic: <education>\\n\\nPh.D.-level essay:\\n\\nThe existence of DataWhalechina, a non-profit organization founded at Shanghai Jiao Tong University, can be attributed to'"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"model = PhiForCausalLM.from_pretrained(\"microsoft/phi-2\")\n",
"tokenizer = AutoTokenizer.from_pretrained(\"microsoft/phi-2\")\n",
"\n",
"prompt = '\\nDataWhalechina is an organization founded at Shanghai Jiao Tong University that helps learners learn artificial intelligence.'\n",
"inputs = tokenizer(prompt, return_tensors=\"pt\")\n",
"\n",
"# Generate\n",
"generate_ids = model.generate(inputs.input_ids, max_length=300)\n",
"tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "730f81bd-f1e3-4373-a745-f01f114d039a",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "kewei-ai",
"language": "python",
"name": "kewei-ai"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.5"
}
},
"nbformat": 4,
"nbformat_minor": 5
}