sharpbai commited on
Commit
0dc124f
1 Parent(s): 173078f

Upload folder using huggingface_hub

Browse files
configuration_chatglm.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+
4
+ class ChatGLMConfig(PretrainedConfig):
5
+ def __init__(
6
+ self,
7
+ num_layers=28,
8
+ padded_vocab_size=65024,
9
+ hidden_size=4096,
10
+ ffn_hidden_size=13696,
11
+ kv_channels=128,
12
+ num_attention_heads=32,
13
+ seq_length=2048,
14
+ hidden_dropout=0.0,
15
+ attention_dropout=0.0,
16
+ layernorm_epsilon=1e-5,
17
+ rmsnorm=True,
18
+ apply_residual_connection_post_layernorm=False,
19
+ post_layer_norm=True,
20
+ add_bias_linear=False,
21
+ add_qkv_bias=False,
22
+ interleaved_qkv=False,
23
+ bias_dropout_fusion=True,
24
+ multi_query_attention=False,
25
+ multi_query_group_num=1,
26
+ apply_query_key_layer_scaling=True,
27
+ attention_softmax_in_fp32=True,
28
+ fp32_residual_connection=False,
29
+ quantization_bit=0,
30
+ **kwargs
31
+ ):
32
+ self.num_layers = num_layers
33
+ self.padded_vocab_size = padded_vocab_size
34
+ self.hidden_size = hidden_size
35
+ self.ffn_hidden_size = ffn_hidden_size
36
+ self.kv_channels = kv_channels
37
+ self.num_attention_heads = num_attention_heads
38
+ self.seq_length = seq_length
39
+ self.hidden_dropout = hidden_dropout
40
+ self.attention_dropout = attention_dropout
41
+ self.layernorm_epsilon = layernorm_epsilon
42
+ self.rmsnorm = rmsnorm
43
+ self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm
44
+ self.post_layer_norm = post_layer_norm
45
+ self.add_bias_linear = add_bias_linear
46
+ self.add_qkv_bias = add_qkv_bias
47
+ self.bias_dropout_fusion = bias_dropout_fusion
48
+ self.multi_query_attention = multi_query_attention
49
+ self.multi_query_group_num = multi_query_group_num
50
+ self.apply_query_key_layer_scaling = apply_query_key_layer_scaling
51
+ self.attention_softmax_in_fp32 = attention_softmax_in_fp32
52
+ self.fp32_residual_connection = fp32_residual_connection
53
+ self.quantization_bit = quantization_bit
54
+ super().__init__(**kwargs)
modeling_chatglm.py ADDED
@@ -0,0 +1,1107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ PyTorch ChatGLM model. """
2
+
3
+ import math
4
+ import copy
5
+ import warnings
6
+ import re
7
+ import sys
8
+
9
+ import torch
10
+ import torch.utils.checkpoint
11
+ import torch.nn.functional as F
12
+ from torch import nn
13
+ from torch.nn import CrossEntropyLoss, LayerNorm
14
+ from torch.nn.utils import skip_init
15
+ from typing import Optional, Tuple, Union, List, Callable, Dict, Any
16
+
17
+ from transformers.modeling_outputs import (
18
+ BaseModelOutputWithPast,
19
+ CausalLMOutputWithPast,
20
+ )
21
+ from transformers.modeling_utils import PreTrainedModel
22
+ from transformers.utils import logging
23
+ from transformers.generation.logits_process import LogitsProcessor
24
+ from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig, ModelOutput
25
+
26
+ from .configuration_chatglm import ChatGLMConfig
27
+
28
+ # flags required to enable jit fusion kernels
29
+
30
+ if sys.platform != 'darwin':
31
+ torch._C._jit_set_profiling_mode(False)
32
+ torch._C._jit_set_profiling_executor(False)
33
+ torch._C._jit_override_can_fuse_on_cpu(True)
34
+ torch._C._jit_override_can_fuse_on_gpu(True)
35
+
36
+ logger = logging.get_logger(__name__)
37
+
38
+ _CHECKPOINT_FOR_DOC = "THUDM/ChatGLM2-6B"
39
+ _CONFIG_FOR_DOC = "ChatGLM6BConfig"
40
+
41
+ CHATGLM_6B_PRETRAINED_MODEL_ARCHIVE_LIST = [
42
+ "THUDM/chatglm2-6b",
43
+ # See all ChatGLM models at https://huggingface.co/models?filter=chatglm
44
+ ]
45
+
46
+
47
+ def default_init(cls, *args, **kwargs):
48
+ return cls(*args, **kwargs)
49
+
50
+
51
+ class InvalidScoreLogitsProcessor(LogitsProcessor):
52
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
53
+ if torch.isnan(scores).any() or torch.isinf(scores).any():
54
+ scores.zero_()
55
+ scores[..., 5] = 5e4
56
+ return scores
57
+
58
+
59
+ def split_tensor_along_last_dim(
60
+ tensor: torch.Tensor,
61
+ num_partitions: int,
62
+ contiguous_split_chunks: bool = False,
63
+ ) -> List[torch.Tensor]:
64
+ """Split a tensor along its last dimension.
65
+
66
+ Arguments:
67
+ tensor: input tensor.
68
+ num_partitions: number of partitions to split the tensor
69
+ contiguous_split_chunks: If True, make each chunk contiguous
70
+ in memory.
71
+
72
+ Returns:
73
+ A list of Tensors
74
+ """
75
+ # Get the size and dimension.
76
+ last_dim = tensor.dim() - 1
77
+ last_dim_size = tensor.size()[last_dim] // num_partitions
78
+ # Split.
79
+ tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
80
+ # Note: torch.split does not create contiguous tensors by default.
81
+ if contiguous_split_chunks:
82
+ return tuple(chunk.contiguous() for chunk in tensor_list)
83
+
84
+ return tensor_list
85
+
86
+
87
+ class RotaryEmbedding(nn.Module):
88
+ def __init__(self, dim, original_impl=False, device=None, dtype=None):
89
+ super().__init__()
90
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, device=device, dtype=dtype) / dim))
91
+ self.register_buffer("inv_freq", inv_freq)
92
+ self.dim = dim
93
+ self.original_impl = original_impl
94
+
95
+ def forward_impl(
96
+ self, seq_len: int, n_elem: int, dtype: torch.dtype, device: torch.device, base: int = 10000
97
+ ):
98
+ """Enhanced Transformer with Rotary Position Embedding.
99
+
100
+ Derived from: https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/
101
+ transformers/rope/__init__.py. MIT License:
102
+ https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/license.
103
+ """
104
+ # $\Theta = {\theta_i = 10000^{\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$
105
+ theta = 1.0 / (base ** (torch.arange(0, n_elem, 2, dtype=dtype, device=device) / n_elem))
106
+
107
+ # Create position indexes `[0, 1, ..., seq_len - 1]`
108
+ seq_idx = torch.arange(seq_len, dtype=dtype, device=device)
109
+
110
+ # Calculate the product of position index and $\theta_i$
111
+ idx_theta = torch.outer(seq_idx, theta).float()
112
+
113
+ cache = torch.stack([torch.cos(idx_theta), torch.sin(idx_theta)], dim=-1)
114
+
115
+ # this is to mimic the behaviour of complex32, else we will get different results
116
+ if dtype in (torch.float16, torch.bfloat16, torch.int8):
117
+ cache = cache.bfloat16() if dtype == torch.bfloat16 else cache.half()
118
+ return cache
119
+
120
+ def forward(self, max_seq_len, offset=0):
121
+ return self.forward_impl(
122
+ max_seq_len, self.dim, dtype=self.inv_freq.dtype, device=self.inv_freq.device
123
+ )
124
+
125
+
126
+ @torch.jit.script
127
+ def apply_rotary_pos_emb(x: torch.Tensor, rope_cache: torch.Tensor) -> torch.Tensor:
128
+ # x: [sq, b, np, hn]
129
+ sq, b, np, hn = x.size(0), x.size(1), x.size(2), x.size(3)
130
+ rot_dim = rope_cache.shape[-2] * 2
131
+ x, x_pass = x[..., :rot_dim], x[..., rot_dim:]
132
+ # truncate to support variable sizes
133
+ rope_cache = rope_cache[:sq]
134
+ xshaped = x.reshape(sq, -1, np, rot_dim // 2, 2)
135
+ rope_cache = rope_cache.view(sq, -1, 1, xshaped.size(3), 2)
136
+ x_out2 = torch.stack(
137
+ [
138
+ xshaped[..., 0] * rope_cache[..., 0] - xshaped[..., 1] * rope_cache[..., 1],
139
+ xshaped[..., 1] * rope_cache[..., 0] + xshaped[..., 0] * rope_cache[..., 1],
140
+ ],
141
+ -1,
142
+ )
143
+ x_out2 = x_out2.flatten(3)
144
+ return torch.cat((x_out2, x_pass), dim=-1)
145
+
146
+
147
+ class RMSNorm(torch.nn.Module):
148
+ def __init__(self, normalized_shape, eps=1e-5, device=None, dtype=None, **kwargs):
149
+ super().__init__()
150
+ self.weight = torch.nn.Parameter(torch.empty(normalized_shape, device=device, dtype=dtype))
151
+ self.eps = eps
152
+
153
+ def forward(self, hidden_states: torch.Tensor):
154
+ input_dtype = hidden_states.dtype
155
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
156
+ hidden_states = hidden_states * torch.rsqrt(variance + self.eps)
157
+
158
+ return (self.weight * hidden_states).to(input_dtype)
159
+
160
+
161
+ class CoreAttention(torch.nn.Module):
162
+ def __init__(self, config: ChatGLMConfig, layer_number):
163
+ super(CoreAttention, self).__init__()
164
+
165
+ self.apply_query_key_layer_scaling = config.apply_query_key_layer_scaling
166
+ self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32
167
+ if self.apply_query_key_layer_scaling:
168
+ self.attention_softmax_in_fp32 = True
169
+ self.layer_number = max(1, layer_number)
170
+
171
+ projection_size = config.kv_channels * config.num_attention_heads
172
+
173
+ # Per attention head and per partition values.
174
+ self.hidden_size_per_partition = projection_size
175
+ self.hidden_size_per_attention_head = projection_size // config.num_attention_heads
176
+ self.num_attention_heads_per_partition = config.num_attention_heads
177
+
178
+ coeff = None
179
+ self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)
180
+ if self.apply_query_key_layer_scaling:
181
+ coeff = self.layer_number
182
+ self.norm_factor *= coeff
183
+ self.coeff = coeff
184
+
185
+ self.attention_dropout = torch.nn.Dropout(config.attention_dropout)
186
+
187
+ def forward(self, query_layer, key_layer, value_layer, attention_mask):
188
+ pytorch_major_version = int(torch.__version__.split('.')[0])
189
+ if pytorch_major_version >= 2:
190
+ query_layer, key_layer, value_layer = [k.permute(1, 2, 0, 3) for k in [query_layer, key_layer, value_layer]]
191
+ if attention_mask is None and query_layer.shape[2] == key_layer.shape[2]:
192
+ context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer,
193
+ is_causal=True)
194
+ else:
195
+ if attention_mask is not None:
196
+ attention_mask = ~attention_mask
197
+ context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer,
198
+ attention_mask)
199
+ context_layer = context_layer.permute(2, 0, 1, 3)
200
+ new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)
201
+ context_layer = context_layer.reshape(*new_context_layer_shape)
202
+ else:
203
+ # Raw attention scores
204
+
205
+ # [b, np, sq, sk]
206
+ output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0))
207
+
208
+ # [sq, b, np, hn] -> [sq, b * np, hn]
209
+ query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1)
210
+ # [sk, b, np, hn] -> [sk, b * np, hn]
211
+ key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1)
212
+
213
+ # preallocting input tensor: [b * np, sq, sk]
214
+ matmul_input_buffer = torch.empty(
215
+ output_size[0] * output_size[1], output_size[2], output_size[3], dtype=query_layer.dtype,
216
+ device=query_layer.device
217
+ )
218
+
219
+ # Raw attention scores. [b * np, sq, sk]
220
+ matmul_result = torch.baddbmm(
221
+ matmul_input_buffer,
222
+ query_layer.transpose(0, 1), # [b * np, sq, hn]
223
+ key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk]
224
+ beta=0.0,
225
+ alpha=(1.0 / self.norm_factor),
226
+ )
227
+
228
+ # change view to [b, np, sq, sk]
229
+ attention_scores = matmul_result.view(*output_size)
230
+
231
+ # ===========================
232
+ # Attention probs and dropout
233
+ # ===========================
234
+
235
+ # attention scores and attention mask [b, np, sq, sk]
236
+ if self.attention_softmax_in_fp32:
237
+ attention_scores = attention_scores.float()
238
+ if self.coeff is not None:
239
+ attention_scores = attention_scores * self.coeff
240
+ if attention_mask is None and attention_scores.shape[2] == attention_scores.shape[3]:
241
+ attention_mask = torch.ones(output_size[0], 1, output_size[2], output_size[3],
242
+ device=attention_scores.device, dtype=torch.bool)
243
+ attention_mask.tril_()
244
+ attention_mask = ~attention_mask
245
+ if attention_mask is not None:
246
+ attention_scores = attention_scores.masked_fill(attention_mask, float("-inf"))
247
+ attention_probs = F.softmax(attention_scores, dim=-1)
248
+ attention_probs = attention_probs.type_as(value_layer)
249
+
250
+ # This is actually dropping out entire tokens to attend to, which might
251
+ # seem a bit unusual, but is taken from the original Transformer paper.
252
+ attention_probs = self.attention_dropout(attention_probs)
253
+ # =========================
254
+ # Context layer. [sq, b, hp]
255
+ # =========================
256
+
257
+ # value_layer -> context layer.
258
+ # [sk, b, np, hn] --> [b, np, sq, hn]
259
+
260
+ # context layer shape: [b, np, sq, hn]
261
+ output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3))
262
+ # change view [sk, b * np, hn]
263
+ value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1)
264
+ # change view [b * np, sq, sk]
265
+ attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1)
266
+ # matmul: [b * np, sq, hn]
267
+ context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))
268
+ # change view [b, np, sq, hn]
269
+ context_layer = context_layer.view(*output_size)
270
+ # [b, np, sq, hn] --> [sq, b, np, hn]
271
+ context_layer = context_layer.permute(2, 0, 1, 3).contiguous()
272
+ # [sq, b, np, hn] --> [sq, b, hp]
273
+ new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)
274
+ context_layer = context_layer.view(*new_context_layer_shape)
275
+
276
+ return context_layer
277
+
278
+
279
+ class SelfAttention(torch.nn.Module):
280
+ """Parallel self-attention layer abstract class.
281
+
282
+ Self-attention layer takes input with size [s, b, h]
283
+ and returns output of the same size.
284
+ """
285
+
286
+ def __init__(self, config: ChatGLMConfig, layer_number, device=None):
287
+ super(SelfAttention, self).__init__()
288
+ self.layer_number = max(1, layer_number)
289
+
290
+ self.projection_size = config.kv_channels * config.num_attention_heads
291
+
292
+ # Per attention head and per partition values.
293
+ self.hidden_size_per_attention_head = self.projection_size // config.num_attention_heads
294
+ self.num_attention_heads_per_partition = config.num_attention_heads
295
+
296
+ self.multi_query_attention = config.multi_query_attention
297
+ self.qkv_hidden_size = 3 * self.projection_size
298
+ if self.multi_query_attention:
299
+ self.num_multi_query_groups_per_partition = config.multi_query_group_num
300
+ self.qkv_hidden_size = (
301
+ self.projection_size + 2 * self.hidden_size_per_attention_head * config.multi_query_group_num
302
+ )
303
+ self.query_key_value = nn.Linear(config.hidden_size, self.qkv_hidden_size,
304
+ bias=config.add_bias_linear or config.add_qkv_bias,
305
+ device=device, **_config_to_kwargs(config)
306
+ )
307
+
308
+ self.core_attention = CoreAttention(config, self.layer_number)
309
+
310
+ # Output.
311
+ self.dense = nn.Linear(self.projection_size, config.hidden_size, bias=config.add_bias_linear,
312
+ device=device, **_config_to_kwargs(config)
313
+ )
314
+
315
+ def _allocate_memory(self, inference_max_sequence_len, batch_size, device=None, dtype=None):
316
+ if self.multi_query_attention:
317
+ num_attention_heads = self.num_multi_query_groups_per_partition
318
+ else:
319
+ num_attention_heads = self.num_attention_heads_per_partition
320
+ return torch.empty(
321
+ inference_max_sequence_len,
322
+ batch_size,
323
+ num_attention_heads,
324
+ self.hidden_size_per_attention_head,
325
+ dtype=dtype,
326
+ device=device,
327
+ )
328
+
329
+ def forward(
330
+ self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True
331
+ ):
332
+ # hidden_states: [sq, b, h]
333
+
334
+ # =================================================
335
+ # Pre-allocate memory for key-values for inference.
336
+ # =================================================
337
+ # =====================
338
+ # Query, Key, and Value
339
+ # =====================
340
+
341
+ # Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)]
342
+ mixed_x_layer = self.query_key_value(hidden_states)
343
+
344
+ if self.multi_query_attention:
345
+ (query_layer, key_layer, value_layer) = mixed_x_layer.split(
346
+ [
347
+ self.num_attention_heads_per_partition * self.hidden_size_per_attention_head,
348
+ self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head,
349
+ self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head,
350
+ ],
351
+ dim=-1,
352
+ )
353
+ query_layer = query_layer.view(
354
+ query_layer.size()[:-1] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head)
355
+ )
356
+ key_layer = key_layer.view(
357
+ key_layer.size()[:-1] + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head)
358
+ )
359
+ value_layer = value_layer.view(
360
+ value_layer.size()[:-1]
361
+ + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head)
362
+ )
363
+ else:
364
+ new_tensor_shape = mixed_x_layer.size()[:-1] + \
365
+ (self.num_attention_heads_per_partition,
366
+ 3 * self.hidden_size_per_attention_head)
367
+ mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
368
+
369
+ # [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn]
370
+ (query_layer, key_layer, value_layer) = split_tensor_along_last_dim(mixed_x_layer, 3)
371
+
372
+ # apply relative positional encoding (rotary embedding)
373
+ if rotary_pos_emb is not None:
374
+ query_layer = apply_rotary_pos_emb(query_layer, rotary_pos_emb)
375
+ key_layer = apply_rotary_pos_emb(key_layer, rotary_pos_emb)
376
+
377
+ # adjust key and value for inference
378
+ if use_cache:
379
+ if kv_cache is not None:
380
+ cache_k, cache_v = kv_cache
381
+ key_layer = torch.cat((cache_k, key_layer), dim=0)
382
+ value_layer = torch.cat((cache_v, value_layer), dim=0)
383
+ kv_cache = (key_layer, value_layer)
384
+ else:
385
+ kv_cache = None
386
+
387
+ if self.multi_query_attention:
388
+ key_layer = key_layer.unsqueeze(-2)
389
+ key_layer = key_layer.expand(
390
+ -1, -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1
391
+ )
392
+ key_layer = key_layer.contiguous().view(
393
+ key_layer.size()[:2] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head)
394
+ )
395
+ value_layer = value_layer.unsqueeze(-2)
396
+ value_layer = value_layer.expand(
397
+ -1, -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1
398
+ )
399
+ value_layer = value_layer.contiguous().view(
400
+ value_layer.size()[:2] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head)
401
+ )
402
+
403
+ # ==================================
404
+ # core attention computation
405
+ # ==================================
406
+
407
+ context_layer = self.core_attention(query_layer, key_layer, value_layer, attention_mask)
408
+
409
+ # =================
410
+ # Output. [sq, b, h]
411
+ # =================
412
+
413
+ output = self.dense(context_layer)
414
+
415
+ return output, kv_cache
416
+
417
+
418
+ def _config_to_kwargs(args):
419
+ common_kwargs = {
420
+ "dtype": args.torch_dtype,
421
+ }
422
+ return common_kwargs
423
+
424
+
425
+ class MLP(torch.nn.Module):
426
+ """MLP.
427
+
428
+ MLP will take the input with h hidden state, project it to 4*h
429
+ hidden dimension, perform nonlinear transformation, and project the
430
+ state back into h hidden dimension.
431
+ """
432
+
433
+ def __init__(self, config: ChatGLMConfig, device=None):
434
+ super(MLP, self).__init__()
435
+
436
+ self.add_bias = config.add_bias_linear
437
+
438
+ # Project to 4h. If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf
439
+ self.dense_h_to_4h = nn.Linear(
440
+ config.hidden_size,
441
+ config.ffn_hidden_size * 2,
442
+ bias=self.add_bias,
443
+ device=device,
444
+ **_config_to_kwargs(config)
445
+ )
446
+
447
+ def swiglu(x):
448
+ x = torch.chunk(x, 2, dim=-1)
449
+ return F.silu(x[0]) * x[1]
450
+
451
+ self.activation_func = swiglu
452
+
453
+ # Project back to h.
454
+ self.dense_4h_to_h = nn.Linear(
455
+ config.ffn_hidden_size,
456
+ config.hidden_size,
457
+ bias=self.add_bias,
458
+ device=device,
459
+ **_config_to_kwargs(config)
460
+ )
461
+
462
+ def forward(self, hidden_states):
463
+ # [s, b, 4hp]
464
+ intermediate_parallel = self.dense_h_to_4h(hidden_states)
465
+ intermediate_parallel = self.activation_func(intermediate_parallel)
466
+ # [s, b, h]
467
+ output = self.dense_4h_to_h(intermediate_parallel)
468
+ return output
469
+
470
+
471
+ class GLMBlock(torch.nn.Module):
472
+ """A single transformer layer.
473
+
474
+ Transformer layer takes input with size [s, b, h] and returns an
475
+ output of the same size.
476
+ """
477
+
478
+ def __init__(self, config: ChatGLMConfig, layer_number, device=None):
479
+ super(GLMBlock, self).__init__()
480
+ self.layer_number = layer_number
481
+
482
+ self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm
483
+
484
+ self.fp32_residual_connection = config.fp32_residual_connection
485
+
486
+ LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm
487
+ # Layernorm on the input data.
488
+ self.input_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,
489
+ dtype=config.torch_dtype)
490
+
491
+ # Self attention.
492
+ self.self_attention = SelfAttention(config, layer_number, device=device)
493
+ self.hidden_dropout = config.hidden_dropout
494
+
495
+ # Layernorm on the attention output
496
+ self.post_attention_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,
497
+ dtype=config.torch_dtype)
498
+
499
+ # MLP
500
+ self.mlp = MLP(config, device=device)
501
+
502
+ def forward(
503
+ self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True,
504
+ ):
505
+ # hidden_states: [s, b, h]
506
+
507
+ # Layer norm at the beginning of the transformer layer.
508
+ layernorm_output = self.input_layernorm(hidden_states)
509
+ # Self attention.
510
+ attention_output, kv_cache = self.self_attention(
511
+ layernorm_output,
512
+ attention_mask,
513
+ rotary_pos_emb,
514
+ kv_cache=kv_cache,
515
+ use_cache=use_cache
516
+ )
517
+
518
+ # Residual connection.
519
+ if self.apply_residual_connection_post_layernorm:
520
+ residual = layernorm_output
521
+ else:
522
+ residual = hidden_states
523
+
524
+ layernorm_input = torch.nn.functional.dropout(attention_output, p=self.hidden_dropout, training=self.training)
525
+ layernorm_input = residual + layernorm_input
526
+
527
+ # Layer norm post the self attention.
528
+ layernorm_output = self.post_attention_layernorm(layernorm_input)
529
+
530
+ # MLP.
531
+ mlp_output = self.mlp(layernorm_output)
532
+
533
+ # Second residual connection.
534
+ if self.apply_residual_connection_post_layernorm:
535
+ residual = layernorm_output
536
+ else:
537
+ residual = layernorm_input
538
+
539
+ output = torch.nn.functional.dropout(mlp_output, p=self.hidden_dropout, training=self.training)
540
+ output = residual + output
541
+
542
+ return output, kv_cache
543
+
544
+
545
+ class GLMTransformer(torch.nn.Module):
546
+ """Transformer class."""
547
+
548
+ def __init__(self, config: ChatGLMConfig, device=None):
549
+ super(GLMTransformer, self).__init__()
550
+
551
+ self.fp32_residual_connection = config.fp32_residual_connection
552
+ self.post_layer_norm = config.post_layer_norm
553
+
554
+ # Number of layers.
555
+ self.num_layers = config.num_layers
556
+
557
+ # Transformer layers.
558
+ def build_layer(layer_number):
559
+ return GLMBlock(config, layer_number, device=device)
560
+
561
+ self.layers = torch.nn.ModuleList([build_layer(i + 1) for i in range(self.num_layers)])
562
+
563
+ if self.post_layer_norm:
564
+ LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm
565
+ # Final layer norm before output.
566
+ self.final_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,
567
+ dtype=config.torch_dtype)
568
+
569
+ def _get_layer(self, layer_number):
570
+ return self.layers[layer_number]
571
+
572
+ def forward(
573
+ self, hidden_states, attention_mask, rotary_pos_emb, kv_caches=None,
574
+ use_cache: Optional[bool] = True,
575
+ output_hidden_states: Optional[bool] = False,
576
+ ):
577
+ if not kv_caches:
578
+ kv_caches = [None for _ in range(self.num_layers)]
579
+ presents = () if use_cache else None
580
+ all_self_attentions = None
581
+ all_hidden_states = () if output_hidden_states else None
582
+ for index in range(self.num_layers):
583
+ if output_hidden_states:
584
+ all_hidden_states = all_hidden_states + (hidden_states,)
585
+
586
+ layer = self._get_layer(index)
587
+
588
+ hidden_states, kv_cache = layer(
589
+ hidden_states,
590
+ attention_mask,
591
+ rotary_pos_emb,
592
+ kv_cache=kv_caches[index],
593
+ use_cache=use_cache
594
+ )
595
+ if use_cache:
596
+ presents = presents + (kv_cache,)
597
+
598
+ if output_hidden_states:
599
+ all_hidden_states = all_hidden_states + (hidden_states,)
600
+
601
+ # Final layer norm.
602
+ if self.post_layer_norm:
603
+ hidden_states = self.final_layernorm(hidden_states)
604
+
605
+ return hidden_states, presents, all_hidden_states, all_self_attentions
606
+
607
+
608
+ class ChatGLMPreTrainedModel(PreTrainedModel):
609
+ """
610
+ An abstract class to handle weights initialization and
611
+ a simple interface for downloading and loading pretrained models.
612
+ """
613
+
614
+ is_parallelizable = False
615
+ supports_gradient_checkpointing = True
616
+ config_class = ChatGLMConfig
617
+ base_model_prefix = "transformer"
618
+ _no_split_modules = ["GLMBlock"]
619
+
620
+ def _init_weights(self, module: nn.Module):
621
+ """Initialize the weights."""
622
+ return
623
+
624
+ def get_masks(self, input_ids, past_key_values, padding_mask=None):
625
+ batch_size, seq_length = input_ids.shape
626
+ full_attention_mask = torch.ones(batch_size, seq_length, seq_length, device=input_ids.device)
627
+ full_attention_mask.tril_()
628
+ past_length = 0
629
+ if past_key_values:
630
+ past_length = past_key_values[0][0].shape[0]
631
+ if past_length:
632
+ full_attention_mask = torch.cat((torch.ones(batch_size, seq_length, past_length,
633
+ device=input_ids.device), full_attention_mask), dim=-1)
634
+ if padding_mask is not None:
635
+ full_attention_mask = full_attention_mask * padding_mask.unsqueeze(1)
636
+ if not past_length and padding_mask is not None:
637
+ full_attention_mask -= padding_mask.unsqueeze(-1) - 1
638
+ full_attention_mask = (full_attention_mask < 0.5).bool()
639
+ full_attention_mask.unsqueeze_(1)
640
+ return full_attention_mask
641
+
642
+ def get_position_ids(self, input_ids, device):
643
+ batch_size, seq_length = input_ids.shape
644
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1)
645
+ return position_ids
646
+
647
+ def _set_gradient_checkpointing(self, module, value=False):
648
+ if isinstance(module, ChatGLMModel):
649
+ module.gradient_checkpointing = value
650
+
651
+
652
+ class Embedding(torch.nn.Module):
653
+ """Language model embeddings."""
654
+
655
+ def __init__(self, config: ChatGLMConfig, device=None):
656
+ super(Embedding, self).__init__()
657
+
658
+ self.hidden_size = config.hidden_size
659
+ # Word embeddings (parallel).
660
+ self.word_embeddings = nn.Embedding(
661
+ config.padded_vocab_size,
662
+ self.hidden_size,
663
+ dtype=config.torch_dtype,
664
+ device=device
665
+ )
666
+ self.fp32_residual_connection = config.fp32_residual_connection
667
+
668
+ def forward(self, input_ids):
669
+ # Embeddings.
670
+ words_embeddings = self.word_embeddings(input_ids)
671
+ embeddings = words_embeddings
672
+ # Data format change to avoid explicit tranposes : [b s h] --> [s b h].
673
+ embeddings = embeddings.transpose(0, 1).contiguous()
674
+ # If the input flag for fp32 residual connection is set, convert for float.
675
+ if self.fp32_residual_connection:
676
+ embeddings = embeddings.float()
677
+ return embeddings
678
+
679
+
680
+ class ChatGLMModel(ChatGLMPreTrainedModel):
681
+ def __init__(self, config: ChatGLMConfig, device=None, empty_init=True):
682
+ super().__init__(config)
683
+ if empty_init:
684
+ init_method = skip_init
685
+ else:
686
+ init_method = default_init
687
+ init_kwargs = {}
688
+ if device is not None:
689
+ init_kwargs["device"] = device
690
+ self.embedding = init_method(Embedding, config, **init_kwargs)
691
+
692
+ # Rotary positional embeddings
693
+ self.seq_length = config.seq_length
694
+ rotary_dim = (
695
+ config.hidden_size // config.num_attention_heads if config.kv_channels is None else config.kv_channels
696
+ )
697
+
698
+ self.rotary_pos_emb = RotaryEmbedding(rotary_dim // 2, original_impl=config.original_rope, device=device,
699
+ dtype=config.torch_dtype)
700
+ self.encoder = init_method(GLMTransformer, config, **init_kwargs)
701
+ self.output_layer = init_method(nn.Linear, config.hidden_size, config.padded_vocab_size, bias=False,
702
+ dtype=config.torch_dtype, **init_kwargs)
703
+ self.gradient_checkpointing = False
704
+
705
+ def forward(
706
+ self,
707
+ input_ids,
708
+ position_ids: Optional[torch.Tensor] = None,
709
+ attention_mask: Optional[torch.BoolTensor] = None,
710
+ full_attention_mask: Optional[torch.BoolTensor] = None,
711
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
712
+ inputs_embeds: Optional[torch.Tensor] = None,
713
+ use_cache: Optional[bool] = None,
714
+ output_hidden_states: Optional[bool] = None,
715
+ return_dict: Optional[bool] = None,
716
+ ):
717
+ output_hidden_states = (
718
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
719
+ )
720
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
721
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
722
+
723
+ batch_size, seq_length = input_ids.shape
724
+
725
+ if inputs_embeds is None:
726
+ inputs_embeds = self.embedding(input_ids)
727
+
728
+ if full_attention_mask is None:
729
+ if (attention_mask is not None and not attention_mask.all()) or (past_key_values and seq_length != 1):
730
+ full_attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask)
731
+
732
+ # Rotary positional embeddings
733
+ rotary_pos_emb = self.rotary_pos_emb(self.seq_length)
734
+ if position_ids is not None:
735
+ rotary_pos_emb = rotary_pos_emb[position_ids]
736
+ else:
737
+ rotary_pos_emb = rotary_pos_emb[None, :seq_length]
738
+ rotary_pos_emb = rotary_pos_emb.transpose(0, 1).contiguous()
739
+
740
+ # Run encoder.
741
+ hidden_states, presents, all_hidden_states, all_self_attentions = self.encoder(
742
+ inputs_embeds, full_attention_mask, rotary_pos_emb=rotary_pos_emb,
743
+ kv_caches=past_key_values, use_cache=use_cache, output_hidden_states=output_hidden_states
744
+ )
745
+
746
+ if not return_dict:
747
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
748
+
749
+ return BaseModelOutputWithPast(
750
+ last_hidden_state=hidden_states,
751
+ past_key_values=presents,
752
+ hidden_states=all_hidden_states,
753
+ attentions=all_self_attentions,
754
+ )
755
+
756
+ def quantize(self, weight_bit_width: int):
757
+ from .quantization import quantize
758
+ quantize(self.encoder, weight_bit_width)
759
+ return self
760
+
761
+
762
+ class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
763
+ def __init__(self, config: ChatGLMConfig, empty_init=True, device=None):
764
+ super().__init__(config)
765
+
766
+ self.max_sequence_length = config.max_length
767
+ self.transformer = ChatGLMModel(config, empty_init=empty_init, device=device)
768
+ self.config = config
769
+ self.quantized = False
770
+
771
+ if self.config.quantization_bit:
772
+ self.quantize(self.config.quantization_bit, empty_init=True)
773
+
774
+ def _update_model_kwargs_for_generation(
775
+ self,
776
+ outputs: ModelOutput,
777
+ model_kwargs: Dict[str, Any],
778
+ is_encoder_decoder: bool = False,
779
+ standardize_cache_format: bool = False,
780
+ ) -> Dict[str, Any]:
781
+ # update past_key_values
782
+ model_kwargs["past_key_values"] = self._extract_past_from_model_output(
783
+ outputs, standardize_cache_format=standardize_cache_format
784
+ )
785
+
786
+ # update attention mask
787
+ if "attention_mask" in model_kwargs:
788
+ attention_mask = model_kwargs["attention_mask"]
789
+ model_kwargs["attention_mask"] = torch.cat(
790
+ [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
791
+ )
792
+
793
+ # update position ids
794
+ if "position_ids" in model_kwargs:
795
+ position_ids = model_kwargs["position_ids"]
796
+ new_position_id = position_ids[..., -1:].clone()
797
+ new_position_id += 1
798
+ model_kwargs["position_ids"] = torch.cat(
799
+ [position_ids, new_position_id], dim=-1
800
+ )
801
+
802
+ model_kwargs["is_first_forward"] = False
803
+ return model_kwargs
804
+
805
+ def prepare_inputs_for_generation(
806
+ self,
807
+ input_ids: torch.LongTensor,
808
+ past_key_values: Optional[torch.Tensor] = None,
809
+ attention_mask: Optional[torch.Tensor] = None,
810
+ position_ids: Optional[torch.Tensor] = None,
811
+ is_first_forward: bool = True,
812
+ **kwargs
813
+ ) -> dict:
814
+ # only last token for input_ids if past is not None
815
+ if position_ids is None:
816
+ position_ids = self.get_position_ids(input_ids, device=input_ids.device)
817
+ if not is_first_forward:
818
+ position_ids = position_ids[..., -1:]
819
+ input_ids = input_ids[:, -1:]
820
+ return {
821
+ "input_ids": input_ids,
822
+ "past_key_values": past_key_values,
823
+ "position_ids": position_ids,
824
+ "attention_mask": attention_mask,
825
+ "return_last_logit": True
826
+ }
827
+
828
+ def forward(
829
+ self,
830
+ input_ids: Optional[torch.Tensor] = None,
831
+ position_ids: Optional[torch.Tensor] = None,
832
+ attention_mask: Optional[torch.Tensor] = None,
833
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
834
+ inputs_embeds: Optional[torch.Tensor] = None,
835
+ labels: Optional[torch.Tensor] = None,
836
+ use_cache: Optional[bool] = None,
837
+ output_attentions: Optional[bool] = None,
838
+ output_hidden_states: Optional[bool] = None,
839
+ return_dict: Optional[bool] = None,
840
+ return_last_logit: Optional[bool] = False,
841
+ ):
842
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
843
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
844
+
845
+ transformer_outputs = self.transformer(
846
+ input_ids=input_ids,
847
+ position_ids=position_ids,
848
+ attention_mask=attention_mask,
849
+ past_key_values=past_key_values,
850
+ inputs_embeds=inputs_embeds,
851
+ use_cache=use_cache,
852
+ output_hidden_states=output_hidden_states,
853
+ return_dict=return_dict,
854
+ )
855
+
856
+ hidden_states = transformer_outputs[0]
857
+ if return_last_logit:
858
+ hidden_states = hidden_states[-1:]
859
+ lm_logits = self.transformer.output_layer(hidden_states)
860
+ lm_logits = lm_logits.transpose(0, 1).contiguous()
861
+
862
+ loss = None
863
+ if labels is not None:
864
+ lm_logits = lm_logits.to(torch.float32)
865
+
866
+ # Shift so that tokens < n predict n
867
+ shift_logits = lm_logits[..., :-1, :].contiguous()
868
+ shift_labels = labels[..., 1:].contiguous()
869
+ # Flatten the tokens
870
+ loss_fct = CrossEntropyLoss(ignore_index=-100)
871
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
872
+
873
+ lm_logits = lm_logits.to(hidden_states.dtype)
874
+ loss = loss.to(hidden_states.dtype)
875
+
876
+ if not return_dict:
877
+ output = (lm_logits,) + transformer_outputs[1:]
878
+ return ((loss,) + output) if loss is not None else output
879
+
880
+ return CausalLMOutputWithPast(
881
+ loss=loss,
882
+ logits=lm_logits,
883
+ past_key_values=transformer_outputs.past_key_values,
884
+ hidden_states=transformer_outputs.hidden_states,
885
+ attentions=transformer_outputs.attentions,
886
+ )
887
+
888
+ @staticmethod
889
+ def _reorder_cache(
890
+ past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor
891
+ ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:
892
+ """
893
+ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
894
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
895
+ beam_idx at every generation step.
896
+
897
+ Output shares the same memory storage as `past`.
898
+ """
899
+ return tuple(
900
+ (
901
+ layer_past[0].index_select(1, beam_idx.to(layer_past[0].device)),
902
+ layer_past[1].index_select(1, beam_idx.to(layer_past[1].device)),
903
+ )
904
+ for layer_past in past
905
+ )
906
+
907
+ def process_response(self, response):
908
+ response = response.strip()
909
+ response = response.replace("[[训练时间]]", "2023年")
910
+ return response
911
+
912
+ def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = None):
913
+ prompt = ""
914
+ for i, (old_query, response) in enumerate(history):
915
+ prompt += "[Round {}]\n\n问:{}\n\n答:{}\n\n".format(i + 1, old_query, response)
916
+ prompt += "[Round {}]\n\n问:{}\n\n答:".format(len(history) + 1, query)
917
+ inputs = tokenizer([prompt], return_tensors="pt")
918
+ inputs = inputs.to(self.device)
919
+ return inputs
920
+
921
+ def build_stream_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = None):
922
+ if history:
923
+ prompt = "\n\n[Round {}]\n\n问:{}\n\n答:".format(len(history) + 1, query)
924
+ input_ids = tokenizer.encode(prompt, add_special_tokens=False)
925
+ input_ids = input_ids[1:]
926
+ inputs = tokenizer.batch_encode_plus([(input_ids, None)], return_tensors="pt", add_special_tokens=False)
927
+ else:
928
+ prompt = "[Round {}]\n\n问:{}\n\n答:".format(len(history) + 1, query)
929
+ inputs = tokenizer([prompt], return_tensors="pt")
930
+ inputs = inputs.to(self.device)
931
+ return inputs
932
+
933
+
934
+ @torch.no_grad()
935
+ def chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 2048, num_beams=1,
936
+ do_sample=True, top_p=0.8, temperature=0.8, logits_processor=None, **kwargs):
937
+ if history is None:
938
+ history = []
939
+ if logits_processor is None:
940
+ logits_processor = LogitsProcessorList()
941
+ logits_processor.append(InvalidScoreLogitsProcessor())
942
+ gen_kwargs = {"max_length": max_length, "num_beams": num_beams, "do_sample": do_sample, "top_p": top_p,
943
+ "temperature": temperature, "logits_processor": logits_processor, **kwargs}
944
+ inputs = self.build_inputs(tokenizer, query, history=history)
945
+ outputs = self.generate(**inputs, **gen_kwargs)
946
+ outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):]
947
+ response = tokenizer.decode(outputs)
948
+ response = self.process_response(response)
949
+ history = history + [(query, response)]
950
+ return response, history
951
+
952
+ @torch.no_grad()
953
+ def stream_chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, past_key_values=None,
954
+ max_length: int = 2048, do_sample=True, top_p=0.8, temperature=0.8, logits_processor=None,
955
+ return_past_key_values=False, **kwargs):
956
+ if history is None:
957
+ history = []
958
+ if logits_processor is None:
959
+ logits_processor = LogitsProcessorList()
960
+ logits_processor.append(InvalidScoreLogitsProcessor())
961
+ gen_kwargs = {"max_length": max_length, "do_sample": do_sample, "top_p": top_p,
962
+ "temperature": temperature, "logits_processor": logits_processor, **kwargs}
963
+ if past_key_values is None and not return_past_key_values:
964
+ inputs = self.build_inputs(tokenizer, query, history=history)
965
+ else:
966
+ inputs = self.build_stream_inputs(tokenizer, query, history=history)
967
+ if past_key_values is not None:
968
+ past_length = past_key_values[0][0].shape[0]
969
+ inputs.position_ids += past_length
970
+ attention_mask = inputs.attention_mask
971
+ attention_mask = torch.cat((attention_mask.new_ones(1, past_length), attention_mask), dim=1)
972
+ inputs['attention_mask'] = attention_mask
973
+ for outputs in self.stream_generate(**inputs, past_key_values=past_key_values,
974
+ return_past_key_values=return_past_key_values, **gen_kwargs):
975
+ if return_past_key_values:
976
+ outputs, past_key_values = outputs
977
+ outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):]
978
+ response = tokenizer.decode(outputs)
979
+ response = self.process_response(response)
980
+ new_history = history + [(query, response)]
981
+ if return_past_key_values:
982
+ yield response, new_history, past_key_values
983
+ else:
984
+ yield response, new_history
985
+
986
+ @torch.no_grad()
987
+ def stream_generate(
988
+ self,
989
+ input_ids,
990
+ generation_config: Optional[GenerationConfig] = None,
991
+ logits_processor: Optional[LogitsProcessorList] = None,
992
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
993
+ prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
994
+ return_past_key_values=False,
995
+ **kwargs,
996
+ ):
997
+ batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]
998
+
999
+ if generation_config is None:
1000
+ generation_config = self.generation_config
1001
+ generation_config = copy.deepcopy(generation_config)
1002
+ model_kwargs = generation_config.update(**kwargs)
1003
+ bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id
1004
+
1005
+ if isinstance(eos_token_id, int):
1006
+ eos_token_id = [eos_token_id]
1007
+
1008
+ has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
1009
+ if has_default_max_length and generation_config.max_new_tokens is None:
1010
+ warnings.warn(
1011
+ f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. "
1012
+ "This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we"
1013
+ " recommend using `max_new_tokens` to control the maximum length of the generation.",
1014
+ UserWarning,
1015
+ )
1016
+ elif generation_config.max_new_tokens is not None:
1017
+ generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length
1018
+ if not has_default_max_length:
1019
+ logger.warn(
1020
+ f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
1021
+ f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
1022
+ "Please refer to the documentation for more information. "
1023
+ "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)",
1024
+ UserWarning,
1025
+ )
1026
+
1027
+ if input_ids_seq_length >= generation_config.max_length:
1028
+ input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
1029
+ logger.warning(
1030
+ f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
1031
+ f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
1032
+ " increasing `max_new_tokens`."
1033
+ )
1034
+
1035
+ # 2. Set generation parameters if not already defined
1036
+ logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
1037
+ stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
1038
+
1039
+ logits_processor = self._get_logits_processor(
1040
+ generation_config=generation_config,
1041
+ input_ids_seq_length=input_ids_seq_length,
1042
+ encoder_input_ids=input_ids,
1043
+ prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
1044
+ logits_processor=logits_processor,
1045
+ )
1046
+
1047
+ stopping_criteria = self._get_stopping_criteria(
1048
+ generation_config=generation_config, stopping_criteria=stopping_criteria
1049
+ )
1050
+ logits_warper = self._get_logits_warper(generation_config)
1051
+
1052
+ unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
1053
+ scores = None
1054
+ while True:
1055
+ model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
1056
+ # forward pass to get next token
1057
+ outputs = self(
1058
+ **model_inputs,
1059
+ return_dict=True,
1060
+ output_attentions=False,
1061
+ output_hidden_states=False,
1062
+ )
1063
+
1064
+ next_token_logits = outputs.logits[:, -1, :]
1065
+
1066
+ # pre-process distribution
1067
+ next_token_scores = logits_processor(input_ids, next_token_logits)
1068
+ next_token_scores = logits_warper(input_ids, next_token_scores)
1069
+
1070
+ # sample
1071
+ probs = nn.functional.softmax(next_token_scores, dim=-1)
1072
+ if generation_config.do_sample:
1073
+ next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
1074
+ else:
1075
+ next_tokens = torch.argmax(probs, dim=-1)
1076
+
1077
+ # update generated ids, model inputs, and length for next step
1078
+ input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
1079
+ model_kwargs = self._update_model_kwargs_for_generation(
1080
+ outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
1081
+ )
1082
+ unfinished_sequences = unfinished_sequences.mul((sum(next_tokens != i for i in eos_token_id)).long())
1083
+ if return_past_key_values:
1084
+ yield input_ids, outputs.past_key_values
1085
+ else:
1086
+ yield input_ids
1087
+ # stop when each sentence is finished, or if we exceed the maximum length
1088
+ if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):
1089
+ break
1090
+
1091
+ def quantize(self, bits: int, empty_init=False, device=None, **kwargs):
1092
+ if bits == 0:
1093
+ return
1094
+
1095
+ from .quantization import quantize
1096
+
1097
+ if self.quantized:
1098
+ logger.info("Already quantized.")
1099
+ return self
1100
+
1101
+ self.quantized = True
1102
+
1103
+ self.config.quantization_bit = bits
1104
+
1105
+ self.transformer.encoder = quantize(self.transformer.encoder, bits, empty_init=empty_init, device=device,
1106
+ **kwargs)
1107
+ return self
quantization.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.nn import Linear
2
+ from torch.nn.parameter import Parameter
3
+
4
+ import bz2
5
+ import torch
6
+ import base64
7
+ import ctypes
8
+ from transformers.utils import logging
9
+
10
+ from typing import List
11
+ from functools import partial
12
+
13
+ logger = logging.get_logger(__name__)
14
+
15
+ try:
16
+ from cpm_kernels.kernels.base import LazyKernelCModule, KernelFunction, round_up
17
+
18
+ class Kernel:
19
+ def __init__(self, code: bytes, function_names: List[str]):
20
+ self.code = code
21
+ self._function_names = function_names
22
+ self._cmodule = LazyKernelCModule(self.code)
23
+
24
+ for name in self._function_names:
25
+ setattr(self, name, KernelFunction(self._cmodule, name))
26
+
27
+ quantization_code = "$QlpoOTFBWSZTWU9yuJUAQHN//////////f/n/8/n///n//bt4dTidcVx8X3V9FV/92/v4B7/AD5FBQFAAAChSgKpFCFAFVSigUAAAEKhSgUUqgFBKigqVREQAABQBQIANDTTIGI00BkZBkNGE0A0BkBkGQGRkaNAaAGQNBoGgDIAAYIGTI0DQAQAaGmmQMRpoDIyDIaMJoBoDIDIMgMjI0aA0AMgaDQNAGQAAwQMmRoGgAgA0NNMgYjTQGRkGQ0YTQDQGQGQZAZGRo0BoAZA0GgaAMgABggZMjQNABABoaaZAxGmgMjIMhowmgGgMgMgyAyMjRoDQAyBoNA0AZAADBAyZGgaAAmqU1NEgJqnptU/Sn4jRR6J6epk2pqb1Q/SgAPUGgyNNGjQ2SBpoAZAAGg0NB6mgDIAAAAA2oaApSREBNAARhGiYEaEwU8pvImlP0k2aam1GaGqbFNM1MHpTwmkepmyU9R6nqPKekHqNNPUxNGhp6n6p6QaZ6o9TG1GMqcoV9ly6nRanHlq6zPNbnGZNi6HSug+2nPiZ13XcnFYZW+45W11CumhzYhchOJ2GLLV1OBjBjGf4TptOddTSOcVxhqYZMYwZXZZY00zI1paX5X9J+b+f4e+x43RXSxXPOdquiGpduatGyXneN696M9t4HU2eR5XX/kPhP261NTx3JO1Ow7LyuDmeo9a7d351T1ZxnvnrvYnrXv/hXxPCeuYx2XsNmO003eg9J3Z6U7b23meJ4ri01OdzTk9BNO96brz+qT5nuvvH3ds/G+m/JcG/F2XYuhXlvO+jP7U3XgrzPN/lr8Sf1n6j4j7jZs+s/T0tNaNNYzTs12rxjwztHlnire3Nzc3N1wuBwOBwXBvZfoHpD7rFmR99V5vj3aXza3xdBbXMalubTg/jIv5dfAi54Pdc75j4z412n3Npj3Ld/ENm7a3b/Cod6h/ret1/5vn/C+l+gdslMvgPSLJ8d8q+U66fevYn/tW1chleEtNTGlcHCbLRlq0tHzF5tsbbZZfHjjLgZu42XCuC3NrdjTasZGNzgxPIrGqp7r3p7L2p5XjnpPSmTd5XtzqnB6U87zzg1Ol0zd0zsLszxR6lkxp35u6/teL0L0W922cR7Lu1lpL9CsHirzuM2T+BgsyViT6LHcm0/Vr6U/7LGGyJeqTEjt0PHWhF5mCT7R9mtlDwriYv0Tyr/OxYt6qp5r0mPVT0608TqnqMZaarU2nFwrTzzlrs1ed7z1ux60wyr4ydCaTi3enW8x68x0zU7tXSlcmPSW1mGpWJMg4zmPC2lK96tp0OE80y4MfEvnZj8zGluR6b22ki1Ou9V2nCd9xovcPvcYMZYy0lvN60ScZ45vN6yeCeeXFb1lVjnnCar5fwXwE2bzJ4HI1XVPXfXZMm44GUsMpYsmLB65TuVdm0cl0b+i/wGNN66XjeV7zuPpHcnK/juhhjdfId5jMdE5nN0dGmmm2zZs2cexD5n9p/dY352XsvXHaZNWWsmmS1atjR452nYudzvqv2HMRyvNNnlMcDl3R2+yx2uVrBubTW9icHDVtbNXlZm7jma1rM4VurZZd2y6nUau7ZXZ7bVU+mnoOVxZGMrVmvX60605JwmzGZhhhjTWtaaaMaaGTGmNMZasY0iX8VMUl8eepaIrzGSpemWOQyZORk2bNpjUybMmxqYmknCGCFynutfksaZpjTNMaaatM0xsxcGR0sociNqxNSmhhR1ZJPbsn8qyF0t2qH6iYBclclalbtTTcHTDsPaX6rlnElph2Jyumumtynv2Kk8GI7rsvXbIcJgHJOSaSXnnGaI3m87RtVXJOZ/YtgdTE6Wpha6ZlE8ayXkef1fh602r2WwvfMXtMdLlkfnLFdYYwYso+bWqm7yJqHXZGw2nrS5ZanSYnWlxBxMF1V940K2wdrI7R6OYf7DGGamMmTSbRhlS45xmVOumF1EyPCmHrrN8wwZOOrdNtLeMtzFzDlWnfTBxMk2NaXIZHBYxYLD4w8yju0ao65Vz1OIXoS9dLanwCe1PWrYuWMqf1if1z2k2yYfKJ741PDgno1ZQ8DRqvUny3mNoWTzGO6m1DkrJI8JiR5cSd+vZdGOO8nrMoc5+NDUFsMSXaZJeNlMmGLtJsovOsUp7I9S5VojKxF6bTVEelXqlfJobQr3LozSh2Jk7VcrVMfhXqszGWMzNqGhqZY0OadxkyyMssKugZR0KNFXBHlqwmJgTE/BNVMk6ItJXZMR0H47GpXv/DMOvNkmVuaV1PRfEdxuqc7Hcd+ZV/zTLaRxWk0nl9CdCeM6mn5rstHIBcpiuwmUZXeq81DacHI2rmrZ5SuE5mOZd6LQrZg9mx32TprA8BMo5jKN6yLTCi3WzQaZSuhzTtM1fUTGVpG8Tw+KXI0tjEpiWxtLYynOlktSbVlaI5kxP8TDH8kx50xoxi5KcA4pcja8KWLRlO/Ks6q06ergnvm1ca3Tq8Uw7LTUsmWyctXPWmpitl/uvGcWTGXGuAXDfhqazGmjkxcJW5hMMMMpYsXl2TZYtVOddG3XCarUt6Ptq9CZXSNzyuRzqRZOjsxdBbFVz6OA5HI43r1jityVlVpVkxmOsyaYWE1NTGq1sOVh36mHMcxtSvcy70edG0ZGR3I1Go1GRlV7mWWo1G0ZGRqlvH40l7o4m5xMWLLLYyNjnqc8556mdPqLJ31n/1nWOncxzG1tizrHs/Z+d2vP/B/l8wdJ6rHUn2nbbDq4p6htFtYzMMMTaZis1K5GKzGNmxhmUx2DDlZ/qNnIx41xnaMfCZWYaZWtNLTNW8ND4Fw1MyZOCdM428suKG1ehW8TesOydg7J+YYcD4cYR+8dFK6M4E3HM9ZfRNNL+Sn6rsl4DsrDl2HpPCnfxjGXtbZtYys1ttlyJ4T+BvexjGWRjMszK4Jpc77D3GyuVD7q0+G8m9G+2+rGm7cOR2y7FdtY2XUYx/oNlfRYxhMYyYZkyyg55enna9Kt/FFi6GMMwYwdwxWgxGMLKYmUyGExTKMZkMFhkymKuh0NOBNnBu+23LdwDoZYYzGGMxtORaTU1pjTGWTTGGtMrNWUsyyTTLLG1qy2ZjbK2DBllWqxMtBMaYZQmcE7zvvRcTkclUwdkxTaSdyySt/7fpL+T1v516Ji97fwr5JbLu305zMn5+GMTTZ9F+y7ExwmGVfG44yxn3dLv6l5i+Wth1jCrDq21nW9LqvvDzz3Vf3LLH/O/32TJ/erx3bXftO4eF+G956D952K/An4NfvOpjFjExjevP/UmE0fIoZXx6/w6lX/no3D0bLt+ixjieBM6ksRd0yB4Lt2SwYNE+gd1detlZWUnpiZfGfFaK+4PyCa/v18V8X75pe9fLXzp7l3VjF76vWZmHwGz1IZNWT7b8yddJ4q5kyrVdfru6atWc7bVYztL9Jf4GXvT+Y8m9/YsXP6H018a8D4XVOqvfzqeR+6yZOD8dPv0+U7/q5Pl+2dNb0MjzGVH5p6MNQ7cOWvw62U9aHE8DprDek+McLyvDz+te+9Zhq5+YTruufMcWMabqysTmZVWjKPfnK0wyVcrsuhjZRdLkHNvD72b9abriOSGIxiLixMOoalNPXzy+wT/tf+U6HHONfsz+xe8ufHBdQWWGWLA9if0rsnmrxK5LvRZQeWsTCsrmOYy8VteVfuRfcVTtDLItLIsMYxZLdU/DbtSemxF6Z6Zo5WBXE4tFdCyVMMXMTEMZXVlS6Xec2T4e0tHsRcEuWshcJ2YsNF5rUx1E8ifCq6Z+ZP7qdCeu/aTwFd53l16/o0NOw6O3dLavP4Hbi4RdmuDk6DoYaninC0+o4uZjbJ7Rxeu0/FbuFg+q7DVS6fQe0rZ6NDGUNNU6DEqOaLTicKnYZMnBWruljQxoaS3dZhocDge0bSTyOvdAbG5hxe2xji7E/L55xX13wWNDi6HCekcFxfCPGxY0MXC+s7afWaMdDyjyr+o8Rudm/NabOZvdl274zH4f5XK9z6On1Pe/K5TdPAslg77BjuO6Y3eO7GqvOPG/stknp1leyvLL0Z7bl9I4noMvLkzytLhWYzrOZzLXCORe028rORzOg4N/L0HlMOQ3Pgmnbb6KczlabORpu980q37TBqRu0/p3PO6234Bl03Ynuz+9W7gnsEcmvYaYY3aMYY0wx3pYd+ujsXauWdaY5Xkbtl23fPzFHiDB/QMo0yFjBllYxTQYYyxkrwn7JufwJ/PfgJ+C83X69ni6zvXcnyXabv0ncbLwsceS+RNlyN2mnneJtX0ngYO0+e+0+UnA+Wch3ji8hj5an4h+i6XBySU4n+R0roVcbw5yvHrmr4Yw8Y7x6c+9POPYHI5HI5HI5HI5HGXGww4nE4nrVyOR8XeqPEO7PLOiukYa3Novk5hV4cdtYZLI93e+uxff2jRo0aNGjRo0aNG1bVtW1dy3m83m8+tQ5ZzHw3nObwOu8La9Rc1dtkdS8A3eTk823tnktXWlxN6Oixe06zrN70Isd9jiOgZFq9yfkPqP/SLhN2Myl8jDM43bl1nbcb4cO57jlh8Jow6pzXZdL4dyODTuuhu77FyO27DdwdRxmvO+O+3N2+BdqyTwLHVczDVY4UPE4O66/ZO2cx1LFzVdSXtF7G4HMbrauOHRw6c8FdZ5m9fHZHYZXfTlZquyynSyTTKke6vcffSD9pzPA/G7n7jxPmuhc1DHMynPMrGL6AdewYmwu5ko+UUyTwrMv27rPH1v1nGqd87+p6N6LU8k3NEng53xXyHS97+44OSg/sy/hn+Se6yfYNjW0/uTgP+PvWYzLMmjhcLB/gGpri6H83/84eUXWT6T9Hsv7785z/7z4icpW+zfXypuR7rx/gMdZb1/wC678pcs8/2a3mDitGHxl9mfPlll5MafWWqxk/eYuTDgcNMzDGWLWvsuglNxs53GtN6uWpktlW1tZZYcuinMMWmnNnJydze3b2Y1McBxrBkXw799izLMZZYyy0TkbsGM4p03S2uVu5s/XXUdSdec6smVxZYYGpVmT8A+8ajuEyV5FatkvVru2x6uxGXXbH4A+jvgP4GMYy3iPLXzq/6z65+E005ey+cwMZD3fZcqc6xpjTFjQ0P3U+e++cPYmTIwj0nrK5NPTfl3WvpfLtXDcb2HQMudYOxFXQBor4L4T6vrOauFctYXJQ++NUWmJe5bmx1jDiZS1dTqWxo4GR8jm3fttpmPHppk9PEyv4/y8/sO07XacOmcqc0x2Vi9BvNJvN5oW8x4mOsydpidRxMYJPx06m1bqPzq9KtK8sxXNXFodD/+MYYaJTLwOhc9brCsV18oOR1i4tXChyTkq4lf4y1Ke+9axjDHqs1mfBbMXuP4Hzi+X7t8vzv7bHerrUPgPCxhjre4fXdfLNtNM+Jd+Zdh8xd8wP87uNPoPgv4W7/5P2BuxfsMabNnMnza+54Pdi5U671GPZY8CehX8Voeoo7FHpkeEc6715FwHZrIrUrHaviPUbPZHND+IhczrP6FcYvhOZ0Di/ETt0OI+YwNWR9r7tpf6WDeZKZDB1+z2IthOl1mPyb5FluvEx9h9d0NnM0Y1XPFkWIsk1WotJ0PBMmkvjvQTd0e71tfeV+8r8lQ/tpzpsmxJ+InrI/dj2UajUajVTUajatRqNRtGo1Go1Go4wjeMpZFMVV9CHbofPraLsJ3JpWV2XOoanCuFky4y3PPNxucK2uKC1Lbdb1eo+m5XomN6HfeZsabHLHRX/K+offtNGGmHWctcVcG44MdSqsOLY9VzX+Zxfxn2HPdWTpzWvkrtJ8M5zorrKcquRytJ5N5DZmcaW02l76nWO+BqPXm1A2Ry/0q71dH/mqrqeFjkYxjEXtsX8qubTk67rGycyqsdm4tZx5D6D5hhi0waaWmiaMP81Yjii5qxPlPuU/GfTL1Y5E6Jyfiq63qTa39A4J0sOGDgO9WF9bOXl0XfPRbsY2bPNKPy1YrFYrFYmRhhlTIyMjJWJYZHXuCXI8OoXsvfljGLFicNifpp2XunoPiG1wtx3p1Tah+/DD66OnVtVXP9rKbVxOnL0tR/rHtqB5UDErUVcl11D4qqvjpOcxX7armUNJB3LpW6bxVvD08e8h3odKKvyCFZBdSh2FVcST9xV3n3T8t1j7Kr9qgrqXg+13Pt5U7JCvFXVIV1YG5lRhkVYZJYYDDD4KOIMoHCp26WS8GB7uBh2zIdgq/PKyInjV2STShuoapUdCpX1yTwqq/z1VvET7Kh5nVPkO8YyxjLt2MaaMmWTLQvx3qnzltnXW0p2jxgbEtSny/Osv8Y9pLMXYoHVPAhkVdWVeODhR6q9/Sxe2liwwZWMVvFXfRkeIDxAePUPIrdJ4ey6yquzH+PD/bUOWAu05qVHtFd8rrKHSoeNIOUqrYr3FXyToqfYJgwmJdKpXXOwYYegNNGMzfZPp/t3t/DVs4zjNTN61rRqaWaa4NYbRjTa0tWwy2Y2tGN8ZO8ofNKq4j9SL7I+cSm4/6ovLV5HNXLI0jJidwrtk6ynCaP6Z++GjRlWS3tLeW129Mi9evxU9mtz6s5J3Z7M2ngTgnKvmpomxpaLCzPfmx0JWE+m3NLDDGOX47RctdYYNK5jakdqLkRlI39n590T5zctGSwwZZDJj6kW8XSi6ot2MmWWJ0DUT3nuvebBudScjZ79g8cWJ8av0k+/bE5WKd5MdbFpbDVMxu1DVMmtNZGJvq1mtRbn6M+g/kP0FwDwr7quZs7xosNGpbscyxhhd9TyJyFwbLcxlTasg75vW7TsV5K7ji44XPMMrdoj+Y3rT0Hie62nlYV/pwczzOmdLqLhYkzGMzCZWGMQzGMSsZYY6Di1t4nlJ+Em63mJxrVLxPbYxNEdgc1dU2iOKyoYYWjNrEeHTYybVk0atSa7ehuwsWMWTqn1TrnS6hYsi71d1+s+k+ic70e20fzE/VaTdxT9ZtU4GIXdeNx3X77guYYfpHeTQjaMX6brOu4OY4K7Y2d9mbHarI5ox3p4GpJ2Vd/Tst60f7j999pppjR+Q/Qf8J/VaORs3cji7FfFuN61+ui9s8hix1OCh5KGVV23BPXvZfz3CLyHpix+exi8z/KnCnosY2eunor+cxyPO/xJ0vKey9OvE9VjqaYu0x3Z3jd6o2b1T12D+F8l232lwaaacD5LE8LBxu7WTlbWraWpew8Xexjel3E+wWD4APITdNqR8F3R3T0lunCQ4GaE9R37DxeCYfcHi4xci5ovKfxVs55y2hf+65E/Xdp6jR5nrebTmi5incpkyOjs50JvrZwstbbW6kfuuQw+2mykf/EXNFzxfKTrxew929TR6bWnGL//F3JFOFCQT3K4lQ"
28
+
29
+ kernels = Kernel(
30
+ bz2.decompress(base64.b64decode(quantization_code)),
31
+ [
32
+ "int4WeightCompression",
33
+ "int4WeightExtractionFloat",
34
+ "int4WeightExtractionHalf",
35
+ "int8WeightExtractionFloat",
36
+ "int8WeightExtractionHalf",
37
+ ],
38
+ )
39
+ except Exception as exception:
40
+ kernels = None
41
+ logger.warning("Failed to load cpm_kernels:" + str(exception))
42
+
43
+
44
+ class W8A16Linear(torch.autograd.Function):
45
+ @staticmethod
46
+ def forward(ctx, inp: torch.Tensor, quant_w: torch.Tensor, scale_w: torch.Tensor, weight_bit_width):
47
+ ctx.inp_shape = inp.size()
48
+ ctx.weight_bit_width = weight_bit_width
49
+ out_features = quant_w.size(0)
50
+ inp = inp.contiguous().view(-1, inp.size(-1))
51
+ weight = extract_weight_to_half(quant_w, scale_w, weight_bit_width)
52
+ ctx.weight_shape = weight.size()
53
+ output = inp.mm(weight.t())
54
+ ctx.save_for_backward(inp, quant_w, scale_w)
55
+ return output.view(*(ctx.inp_shape[:-1] + (out_features,)))
56
+
57
+ @staticmethod
58
+ def backward(ctx, grad_output: torch.Tensor):
59
+ inp, quant_w, scale_w = ctx.saved_tensors
60
+ weight = extract_weight_to_half(quant_w, scale_w, ctx.weight_bit_width)
61
+ grad_output = grad_output.contiguous().view(-1, weight.size(0))
62
+ grad_input = grad_output.mm(weight)
63
+ grad_weight = grad_output.t().mm(inp)
64
+ return grad_input.view(ctx.inp_shape), grad_weight.view(ctx.weight_shape), None, None
65
+
66
+
67
+ def compress_int4_weight(weight: torch.Tensor): # (n, m)
68
+ with torch.cuda.device(weight.device):
69
+ n, m = weight.size(0), weight.size(1)
70
+ assert m % 2 == 0
71
+ m = m // 2
72
+ out = torch.empty(n, m, dtype=torch.int8, device="cuda")
73
+ stream = torch.cuda.current_stream()
74
+
75
+ gridDim = (n, 1, 1)
76
+ blockDim = (min(round_up(m, 32), 1024), 1, 1)
77
+
78
+ kernels.int4WeightCompression(
79
+ gridDim,
80
+ blockDim,
81
+ 0,
82
+ stream,
83
+ [ctypes.c_void_p(weight.data_ptr()), ctypes.c_void_p(out.data_ptr()), ctypes.c_int32(n), ctypes.c_int32(m)],
84
+ )
85
+ return out
86
+
87
+
88
+ def extract_weight_to_half(weight: torch.Tensor, scale_list: torch.Tensor, source_bit_width: int):
89
+ assert scale_list.dtype in [torch.half, torch.bfloat16]
90
+ assert weight.dtype in [torch.int8]
91
+ if source_bit_width == 8:
92
+ return weight.to(scale_list.dtype) * scale_list[:, None]
93
+ elif source_bit_width == 4:
94
+ func = (
95
+ kernels.int4WeightExtractionHalf if scale_list.dtype == torch.half else kernels.int4WeightExtractionBFloat16
96
+ )
97
+ else:
98
+ assert False, "Unsupported bit-width"
99
+
100
+ with torch.cuda.device(weight.device):
101
+ n, m = weight.size(0), weight.size(1)
102
+ out = torch.empty(n, m * (8 // source_bit_width), dtype=scale_list.dtype, device="cuda")
103
+ stream = torch.cuda.current_stream()
104
+
105
+ gridDim = (n, 1, 1)
106
+ blockDim = (min(round_up(m, 32), 1024), 1, 1)
107
+
108
+ func(
109
+ gridDim,
110
+ blockDim,
111
+ 0,
112
+ stream,
113
+ [
114
+ ctypes.c_void_p(weight.data_ptr()),
115
+ ctypes.c_void_p(scale_list.data_ptr()),
116
+ ctypes.c_void_p(out.data_ptr()),
117
+ ctypes.c_int32(n),
118
+ ctypes.c_int32(m),
119
+ ],
120
+ )
121
+ return out
122
+
123
+
124
+ class QuantizedLinear(torch.nn.Module):
125
+ def __init__(self, weight_bit_width: int, weight, bias=None, device="cpu", dtype=None, empty_init=False, *args,
126
+ **kwargs):
127
+ super().__init__()
128
+ self.weight_bit_width = weight_bit_width
129
+
130
+ shape = weight.shape
131
+
132
+ if weight is None or empty_init:
133
+ self.weight = torch.empty(shape[0], shape[1] * weight_bit_width // 8, dtype=torch.int8, device=device)
134
+ self.weight_scale = torch.empty(shape[0], dtype=dtype, device=device)
135
+ else:
136
+ self.weight_scale = weight.abs().max(dim=-1).values / ((2 ** (weight_bit_width - 1)) - 1)
137
+ self.weight = torch.round(weight / self.weight_scale[:, None]).to(torch.int8)
138
+ if weight_bit_width == 4:
139
+ self.weight = compress_int4_weight(self.weight)
140
+
141
+ self.weight = Parameter(self.weight.to(device), requires_grad=False)
142
+ self.weight_scale = Parameter(self.weight_scale.to(device), requires_grad=False)
143
+ self.bias = Parameter(bias.to(device), requires_grad=False) if bias is not None else None
144
+
145
+ def forward(self, input):
146
+ output = W8A16Linear.apply(input, self.weight, self.weight_scale, self.weight_bit_width)
147
+ if self.bias is not None:
148
+ output = output + self.bias
149
+ return output
150
+
151
+
152
+ def quantize(model, weight_bit_width, empty_init=False, device=None):
153
+ """Replace fp16 linear with quantized linear"""
154
+ for layer in model.layers:
155
+ layer.self_attention.query_key_value = QuantizedLinear(
156
+ weight_bit_width=weight_bit_width,
157
+ weight=layer.self_attention.query_key_value.weight.to(torch.cuda.current_device()),
158
+ bias=layer.self_attention.query_key_value.bias,
159
+ dtype=layer.self_attention.query_key_value.weight.dtype,
160
+ device=layer.self_attention.query_key_value.weight.device if device is None else device,
161
+ empty_init=empty_init
162
+ )
163
+ layer.self_attention.dense = QuantizedLinear(
164
+ weight_bit_width=weight_bit_width,
165
+ weight=layer.self_attention.dense.weight.to(torch.cuda.current_device()),
166
+ bias=layer.self_attention.dense.bias,
167
+ dtype=layer.self_attention.dense.weight.dtype,
168
+ device=layer.self_attention.dense.weight.device if device is None else device,
169
+ empty_init=empty_init
170
+ )
171
+ layer.mlp.dense_h_to_4h = QuantizedLinear(
172
+ weight_bit_width=weight_bit_width,
173
+ weight=layer.mlp.dense_h_to_4h.weight.to(torch.cuda.current_device()),
174
+ bias=layer.mlp.dense_h_to_4h.bias,
175
+ dtype=layer.mlp.dense_h_to_4h.weight.dtype,
176
+ device=layer.mlp.dense_h_to_4h.weight.device if device is None else device,
177
+ empty_init=empty_init
178
+ )
179
+ layer.mlp.dense_4h_to_h = QuantizedLinear(
180
+ weight_bit_width=weight_bit_width,
181
+ weight=layer.mlp.dense_4h_to_h.weight.to(torch.cuda.current_device()),
182
+ bias=layer.mlp.dense_4h_to_h.bias,
183
+ dtype=layer.mlp.dense_4h_to_h.weight.dtype,
184
+ device=layer.mlp.dense_4h_to_h.weight.device if device is None else device,
185
+ empty_init=empty_init
186
+ )
187
+
188
+ return model
tokenization_chatglm.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from typing import List, Optional, Union, Dict
4
+ from sentencepiece import SentencePieceProcessor
5
+ from transformers import PreTrainedTokenizer
6
+ from transformers.utils import logging, PaddingStrategy
7
+ from transformers.tokenization_utils_base import EncodedInput, BatchEncoding
8
+
9
+
10
+ class SPTokenizer:
11
+ def __init__(self, model_path: str):
12
+ # reload tokenizer
13
+ assert os.path.isfile(model_path), model_path
14
+ self.sp_model = SentencePieceProcessor(model_file=model_path)
15
+
16
+ # BOS / EOS token IDs
17
+ self.n_words: int = self.sp_model.vocab_size()
18
+ self.bos_id: int = self.sp_model.bos_id()
19
+ self.eos_id: int = self.sp_model.eos_id()
20
+ self.pad_id: int = self.sp_model.eos_id()
21
+ assert self.sp_model.vocab_size() == self.sp_model.get_piece_size()
22
+
23
+ special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"]
24
+ self.special_tokens = {}
25
+ self.index_special_tokens = {}
26
+ for token in special_tokens:
27
+ self.special_tokens[token] = self.n_words
28
+ self.index_special_tokens[self.n_words] = token
29
+ self.n_words += 1
30
+
31
+ def tokenize(self, s: str):
32
+ return self.sp_model.EncodeAsPieces(s)
33
+
34
+ def encode(self, s: str, bos: bool = False, eos: bool = False) -> List[int]:
35
+ assert type(s) is str
36
+ t = self.sp_model.encode(s)
37
+ if bos:
38
+ t = [self.bos_id] + t
39
+ if eos:
40
+ t = t + [self.eos_id]
41
+ return t
42
+
43
+ def decode(self, t: List[int]) -> str:
44
+ return self.sp_model.decode(t)
45
+
46
+ def decode_tokens(self, tokens: List[str]) -> str:
47
+ text = self.sp_model.DecodePieces(tokens)
48
+ return text
49
+
50
+ def convert_token_to_id(self, token):
51
+ """ Converts a token (str) in an id using the vocab. """
52
+ if token in self.special_tokens:
53
+ return self.special_tokens[token]
54
+ return self.sp_model.PieceToId(token)
55
+
56
+ def convert_id_to_token(self, index):
57
+ """Converts an index (integer) in a token (str) using the vocab."""
58
+ if index in self.index_special_tokens:
59
+ return ""
60
+ return self.sp_model.IdToPiece(index)
61
+
62
+
63
+ class ChatGLMTokenizer(PreTrainedTokenizer):
64
+ vocab_files_names = {"vocab_file": "tokenizer.model"}
65
+
66
+ model_input_names = ["input_ids", "attention_mask", "position_ids"]
67
+
68
+ def __init__(self, vocab_file, padding_side="left", **kwargs):
69
+ super().__init__(padding_side=padding_side, **kwargs)
70
+ self.name = "GLMTokenizer"
71
+
72
+ self.tokenizer = SPTokenizer(vocab_file)
73
+ self.vocab_file = vocab_file
74
+ self.special_tokens = {
75
+ "<bos>": self.tokenizer.bos_id,
76
+ "<eos>": self.tokenizer.eos_id,
77
+ "<pad>": self.tokenizer.pad_id
78
+ }
79
+
80
+ def get_command(self, token):
81
+ if token in self.special_tokens:
82
+ return self.special_tokens[token]
83
+ assert token in self.tokenizer.special_tokens, f"{token} is not a special token for {self.name}"
84
+ return self.tokenizer.special_tokens[token]
85
+
86
+ @property
87
+ def pad_token(self) -> str:
88
+ return "</s>"
89
+
90
+ @property
91
+ def pad_token_id(self):
92
+ return self.get_command("<pad>")
93
+
94
+ @property
95
+ def vocab_size(self):
96
+ return self.tokenizer.n_words
97
+
98
+ def get_vocab(self):
99
+ """ Returns vocab as a dict """
100
+ vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}
101
+ vocab.update(self.added_tokens_encoder)
102
+ return vocab
103
+
104
+ def _tokenize(self, text, **kwargs):
105
+ return self.tokenizer.tokenize(text)
106
+
107
+ def _convert_token_to_id(self, token):
108
+ """ Converts a token (str) in an id using the vocab. """
109
+ return self.tokenizer.convert_token_to_id(token)
110
+
111
+ def _convert_id_to_token(self, index):
112
+ """Converts an index (integer) in a token (str) using the vocab."""
113
+ return self.tokenizer.convert_id_to_token(index)
114
+
115
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
116
+ return self.tokenizer.decode_tokens(tokens)
117
+
118
+ def save_vocabulary(self, save_directory, filename_prefix=None):
119
+ """
120
+ Save the vocabulary and special tokens file to a directory.
121
+
122
+ Args:
123
+ save_directory (`str`):
124
+ The directory in which to save the vocabulary.
125
+ filename_prefix (`str`, *optional*):
126
+ An optional prefix to add to the named of the saved files.
127
+
128
+ Returns:
129
+ `Tuple(str)`: Paths to the files saved.
130
+ """
131
+ if os.path.isdir(save_directory):
132
+ vocab_file = os.path.join(
133
+ save_directory, self.vocab_files_names["vocab_file"]
134
+ )
135
+ else:
136
+ vocab_file = save_directory
137
+
138
+ with open(self.vocab_file, 'rb') as fin:
139
+ proto_str = fin.read()
140
+
141
+ with open(vocab_file, "wb") as writer:
142
+ writer.write(proto_str)
143
+
144
+ return (vocab_file,)
145
+
146
+ def get_prefix_tokens(self):
147
+ prefix_tokens = [self.get_command("[gMASK]"), self.get_command("sop")]
148
+ return prefix_tokens
149
+
150
+ def build_inputs_with_special_tokens(
151
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
152
+ ) -> List[int]:
153
+ """
154
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
155
+ adding special tokens. A BERT sequence has the following format:
156
+
157
+ - single sequence: `[CLS] X [SEP]`
158
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
159
+
160
+ Args:
161
+ token_ids_0 (`List[int]`):
162
+ List of IDs to which the special tokens will be added.
163
+ token_ids_1 (`List[int]`, *optional*):
164
+ Optional second list of IDs for sequence pairs.
165
+
166
+ Returns:
167
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
168
+ """
169
+ prefix_tokens = self.get_prefix_tokens()
170
+ token_ids_0 = prefix_tokens + token_ids_0
171
+ if token_ids_1 is not None:
172
+ token_ids_0 = token_ids_0 + token_ids_1 + [self.get_command("<eos>")]
173
+ return token_ids_0
174
+
175
+ def _pad(
176
+ self,
177
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
178
+ max_length: Optional[int] = None,
179
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
180
+ pad_to_multiple_of: Optional[int] = None,
181
+ return_attention_mask: Optional[bool] = None,
182
+ ) -> dict:
183
+ """
184
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
185
+
186
+ Args:
187
+ encoded_inputs:
188
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
189
+ max_length: maximum length of the returned list and optionally padding length (see below).
190
+ Will truncate by taking into account the special tokens.
191
+ padding_strategy: PaddingStrategy to use for padding.
192
+
193
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
194
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
195
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
196
+ The tokenizer padding sides are defined in self.padding_side:
197
+
198
+ - 'left': pads on the left of the sequences
199
+ - 'right': pads on the right of the sequences
200
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
201
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
202
+ `>= 7.5` (Volta).
203
+ return_attention_mask:
204
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
205
+ """
206
+ # Load from model defaults
207
+ assert self.padding_side == "left"
208
+
209
+ required_input = encoded_inputs[self.model_input_names[0]]
210
+ seq_length = len(required_input)
211
+
212
+ if padding_strategy == PaddingStrategy.LONGEST:
213
+ max_length = len(required_input)
214
+
215
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
216
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
217
+
218
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
219
+
220
+ # Initialize attention mask if not present.
221
+ if "attention_mask" not in encoded_inputs:
222
+ encoded_inputs["attention_mask"] = [1] * seq_length
223
+
224
+ if "position_ids" not in encoded_inputs:
225
+ encoded_inputs["position_ids"] = list(range(seq_length))
226
+
227
+ if needs_to_be_padded:
228
+ difference = max_length - len(required_input)
229
+
230
+ if "attention_mask" in encoded_inputs:
231
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
232
+ if "position_ids" in encoded_inputs:
233
+ encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"]
234
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
235
+
236
+ return encoded_inputs