zxdu20 commited on
Commit
9d1509a
1 Parent(s): bcb053b

Fix default history argument

Browse files
Files changed (1) hide show
  1. modeling_chatglm.py +4 -2
modeling_chatglm.py CHANGED
@@ -1077,8 +1077,10 @@ class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
1077
  )
1078
 
1079
  @torch.no_grad()
1080
- def chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = [], max_length: int = 2048, num_beams=1,
1081
  do_sample=True, top_p=0.7, temperature=0.95, **kwargs):
 
 
1082
  gen_kwargs = {"max_length": max_length, "num_beams": num_beams, "do_sample": do_sample, "top_p": top_p,
1083
  "temperature": temperature, **kwargs}
1084
  if not history:
@@ -1095,7 +1097,7 @@ class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
1095
  response = tokenizer.decode(outputs)
1096
  response = response.strip()
1097
  response = response.replace("[[训练时间]]", "2023年")
1098
- history.append((query, response))
1099
  return response, history
1100
 
1101
  @torch.no_grad()
 
1077
  )
1078
 
1079
  @torch.no_grad()
1080
+ def chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 2048, num_beams=1,
1081
  do_sample=True, top_p=0.7, temperature=0.95, **kwargs):
1082
+ if history is None:
1083
+ history = []
1084
  gen_kwargs = {"max_length": max_length, "num_beams": num_beams, "do_sample": do_sample, "top_p": top_p,
1085
  "temperature": temperature, **kwargs}
1086
  if not history:
 
1097
  response = tokenizer.decode(outputs)
1098
  response = response.strip()
1099
  response = response.replace("[[训练时间]]", "2023年")
1100
+ history = history + [(query, response)]
1101
  return response, history
1102
 
1103
  @torch.no_grad()