HMinions commited on
Commit
70023a5
1 Parent(s): 9fe2b18

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -0
app.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import platform
3
+ import signal
4
+ from transformers import AutoTokenizer, AutoModel
5
+
6
+ tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
7
+ model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
8
+ model = model.eval()
9
+
10
+ os_name = platform.system()
11
+ clear_command = 'cls' if os_name == 'Windows' else 'clear'
12
+ stop_stream = False
13
+
14
+
15
+ def build_prompt(history):
16
+ prompt = "欢迎使用 ChatGLM-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序"
17
+ for query, response in history:
18
+ prompt += f"\n\n用户:{query}"
19
+ prompt += f"\n\nChatGLM-6B:{response}"
20
+ return prompt
21
+
22
+
23
+ def signal_handler(signal, frame):
24
+ global stop_stream
25
+ stop_stream = True
26
+
27
+
28
+ def main():
29
+ history = []
30
+ global stop_stream
31
+ print("欢迎使用 ChatGLM-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序")
32
+ while True:
33
+ query = input("\n用户:")
34
+ if query.strip() == "stop":
35
+ break
36
+ if query.strip() == "clear":
37
+ history = []
38
+ os.system(clear_command)
39
+ print("欢迎使用 ChatGLM-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序")
40
+ continue
41
+ count = 0
42
+ for response, history in model.stream_chat(tokenizer, query, history=history):
43
+ if stop_stream:
44
+ stop_stream = False
45
+ break
46
+ else:
47
+ count += 1
48
+ if count % 8 == 0:
49
+ os.system(clear_command)
50
+ print(build_prompt(history), flush=True)
51
+ signal.signal(signal.SIGINT, signal_handler)
52
+ os.system(clear_command)
53
+ print(build_prompt(history), flush=True)
54
+
55
+
56
+ if __name__ == "__main__":
57
+ main()