MoDongbao commited on
Commit
77abb0e
1 Parent(s): 08555a3

Upload 115 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +3 -0
  2. check_proxy.py +16 -6
  3. config.py +24 -9
  4. core_functional.py +7 -0
  5. crazy_functional.py +58 -18
  6. crazy_functions/crazy_functions_test.py +0 -16
  7. crazy_functions/crazy_utils.py +49 -7
  8. crazy_functions/图片生成.py +67 -0
  9. crazy_functions/对话历史存档.py +106 -5
  10. crazy_functions/总结word文档.py +1 -1
  11. crazy_functions/总结音视频.py +184 -0
  12. crazy_functions/批量Markdown翻译.py +38 -13
  13. crazy_functions/批量总结PDF文档.py +2 -2
  14. crazy_functions/批量翻译PDF文档_多线程.py +94 -9
  15. crazy_functions/解析JupyterNotebook.py +8 -2
  16. crazy_functions/解析项目源代码.py +33 -5
  17. crazy_functions/询问多个大语言模型.py +1 -0
  18. crazy_functions/谷歌检索小助手.py +27 -21
  19. docker-compose.yml +104 -0
  20. docs/Dockerfile+ChatGLM +7 -4
  21. docs/Dockerfile+JittorLLM +59 -0
  22. docs/GithubAction+ChatGLM+Moss +30 -0
  23. docs/GithubAction+JittorLLMs +34 -0
  24. docs/GithubAction+NoLocal +20 -0
  25. docs/self_analysis.md +1 -1
  26. docs/test_markdown_format.py +130 -0
  27. docs/translate_english.json +0 -0
  28. docs/translate_japanese.json +0 -0
  29. docs/translate_traditionalchinese.json +1515 -0
  30. docs/waifu_plugin/autoload.js +30 -0
  31. docs/waifu_plugin/flat-ui-icons-regular.eot +0 -0
  32. docs/waifu_plugin/flat-ui-icons-regular.svg +126 -0
  33. docs/waifu_plugin/flat-ui-icons-regular.ttf +0 -0
  34. docs/waifu_plugin/flat-ui-icons-regular.woff +0 -0
  35. docs/waifu_plugin/jquery-ui.min.js +0 -0
  36. docs/waifu_plugin/jquery.min.js +4 -0
  37. docs/waifu_plugin/live2d.js +0 -0
  38. docs/waifu_plugin/source +1 -0
  39. docs/waifu_plugin/waifu-tips.js +405 -0
  40. docs/waifu_plugin/waifu-tips.json +116 -0
  41. docs/waifu_plugin/waifu.css +290 -0
  42. main.py +6 -7
  43. multi_language.py +499 -0
  44. request_llm/README.md +25 -0
  45. request_llm/bridge_all.py +86 -6
  46. request_llm/bridge_chatglm.py +25 -7
  47. request_llm/bridge_chatgpt.py +21 -10
  48. request_llm/bridge_jittorllms_llama.py +178 -0
  49. request_llm/bridge_jittorllms_pangualpha.py +178 -0
  50. request_llm/bridge_jittorllms_rwkv.py +178 -0
.gitignore CHANGED
@@ -146,3 +146,6 @@ debug*
146
  private*
147
  crazy_functions/test_project/pdf_and_word
148
  crazy_functions/test_samples
 
 
 
 
146
  private*
147
  crazy_functions/test_project/pdf_and_word
148
  crazy_functions/test_samples
149
+ request_llm/jittorllms
150
+ multi-language
151
+ request_llm/moss
check_proxy.py CHANGED
@@ -56,22 +56,24 @@ def patch_and_restart(path):
56
  """
57
  一键更新协议:覆盖和重启
58
  """
59
- import distutils
60
  import shutil
61
  import os
62
  import sys
63
  import time
 
64
  from colorful import print亮黄, print亮绿, print亮红
65
  # if not using config_private, move origin config.py as config_private.py
66
  if not os.path.exists('config_private.py'):
67
  print亮黄('由于您没有设置config_private.py私密配置,现将您的现有配置移动至config_private.py以防止配置丢失,',
68
  '另外您可以随时在history子文件夹下找回旧版的程序。')
69
  shutil.copyfile('config.py', 'config_private.py')
70
- distutils.dir_util.copy_tree(path+'/chatgpt_academic-master', './')
71
- import subprocess
72
  print亮绿('代码已经更新,即将更新pip包依赖……')
73
  for i in reversed(range(5)): time.sleep(1); print(i)
74
  try:
 
75
  subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-r', 'requirements.txt'])
76
  except:
77
  print亮红('pip包依赖安装出现问题,需要手动安装新增的依赖库 `python -m pip install -r requirements.txt`,然后在用常规的`python main.py`的方式启动。')
@@ -92,7 +94,7 @@ def get_current_version():
92
  return current_version
93
 
94
 
95
- def auto_update():
96
  """
97
  一键更新协议:查询版本和用户意见
98
  """
@@ -124,14 +126,22 @@ def auto_update():
124
  try:
125
  patch_and_restart(path)
126
  except:
127
- print('更新失败。')
 
 
 
 
128
  else:
129
  print('自动更新程序:已禁用')
130
  return
131
  else:
132
  return
133
  except:
134
- print('自动更新程序:已禁用')
 
 
 
 
135
 
136
  def warm_up_modules():
137
  print('正在执行一些模块的预热...')
 
56
  """
57
  一键更新协议:覆盖和重启
58
  """
59
+ from distutils import dir_util
60
  import shutil
61
  import os
62
  import sys
63
  import time
64
+ import glob
65
  from colorful import print亮黄, print亮绿, print亮红
66
  # if not using config_private, move origin config.py as config_private.py
67
  if not os.path.exists('config_private.py'):
68
  print亮黄('由于您没有设置config_private.py私密配置,现将您的现有配置移动至config_private.py以防止配置丢失,',
69
  '另外您可以随时在history子文件夹下找回旧版的程序。')
70
  shutil.copyfile('config.py', 'config_private.py')
71
+ path_new_version = glob.glob(path + '/*-master')[0]
72
+ dir_util.copy_tree(path_new_version, './')
73
  print亮绿('代码已经更新,即将更新pip包依赖……')
74
  for i in reversed(range(5)): time.sleep(1); print(i)
75
  try:
76
+ import subprocess
77
  subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-r', 'requirements.txt'])
78
  except:
79
  print亮红('pip包依赖安装出现问题,需要手动安装新增的依赖库 `python -m pip install -r requirements.txt`,然后在用常规的`python main.py`的方式启动。')
 
94
  return current_version
95
 
96
 
97
+ def auto_update(raise_error=False):
98
  """
99
  一键更新协议:查询版本和用户意见
100
  """
 
126
  try:
127
  patch_and_restart(path)
128
  except:
129
+ msg = '更新失败。'
130
+ if raise_error:
131
+ from toolbox import trimmed_format_exc
132
+ msg += trimmed_format_exc()
133
+ print(msg)
134
  else:
135
  print('自动更新程序:已禁用')
136
  return
137
  else:
138
  return
139
  except:
140
+ msg = '自动更新程序:已禁用'
141
+ if raise_error:
142
+ from toolbox import trimmed_format_exc
143
+ msg += trimmed_format_exc()
144
+ print(msg)
145
 
146
  def warm_up_modules():
147
  print('正在执行一些模块的预热...')
config.py CHANGED
@@ -1,7 +1,7 @@
1
  import os
2
 
3
  # [step 1]>> 例如: API_KEY = "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" (此key无效)
4
- API_KEY = os.environ.get("GPT_KEY") # 可同时填写多个API-KEY,用英文逗号分割,例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey1,fkxxxx-api2dkey2"
5
 
6
  # [step 2]>> 改为True应用代理,如果直接在海外服务器部署,此处不修改
7
  USE_PROXY = False
@@ -12,11 +12,11 @@ if USE_PROXY:
12
  # [地址] 懂的都懂,不懂就填localhost或者127.0.0.1肯定错不了(localhost意思是代理软件安装在本机上)
13
  # [端口] 在代理软件的设置里找。虽然不同的代理软件界面不一样,但端口号都应该在最显眼的位置上
14
 
15
- # 代理网络的地址,打开你的科学上网软件查看代理的协议(socks5/http)、地址(localhost)和端口(11284)
16
  proxies = {
17
  # [协议]:// [地址] :[端口]
18
- "http": "socks5h://localhost:11284",
19
- "https": "socks5h://localhost:11284",
20
  }
21
  else:
22
  proxies = None
@@ -28,13 +28,14 @@ DEFAULT_WORKER_NUM = 3
28
 
29
  # [step 4]>> 以下配置可以优化体验,但大部分场合下并不需要修改
30
  # 对话窗的高度
31
- CHATBOT_HEIGHT = 440
32
 
33
  # 代码高亮
34
  CODE_HIGHLIGHT = True
35
 
36
  # 窗口布局
37
  LAYOUT = "LEFT-RIGHT" # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局)
 
38
 
39
  # 发送请求到OpenAI后,等待多久判定为超时
40
  TIMEOUT_SECONDS = 30
@@ -45,9 +46,10 @@ WEB_PORT = -1
45
  # 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制
46
  MAX_RETRY = 2
47
 
48
- # OpenAI模型选择是(gpt4现在只对申请成功的人开放,体验gpt-4可以试试api2d)
49
- LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
50
- AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm"]
 
51
 
52
  # 本地LLM模型如ChatGLM的执行方式 CPU/GPU
53
  LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
@@ -55,13 +57,26 @@ LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
55
  # 设置gradio的并行线程数(不需要修改)
56
  CONCURRENT_COUNT = 100
57
 
 
 
 
58
  # 设置用户名和密码(不需要修改)(相关功能不稳定,与gradio版本和网络都相关,如果本地使用不建议加这个)
59
  # [("username", "password"), ("username2", "password2"), ...]
60
  AUTHENTICATION = []
61
 
62
  # 重新URL重新定向,实现更换API_URL的作用(常规情况下,不要修改!!)
63
- # 格式 {"https://api.openai.com/v1/chat/completions": "重定向的URL"}
 
 
64
  API_URL_REDIRECT = {}
65
 
66
  # 如果需要在二级路径下运行(常规情况下,不要修改!!)(需要配合修改main.py才能生效!)
67
  CUSTOM_PATH = "/"
 
 
 
 
 
 
 
 
 
1
  import os
2
 
3
  # [step 1]>> 例如: API_KEY = "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" (此key无效)
4
+ API_KEY = "" # 可同时填写多个API-KEY,用英文逗号分割,例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey1,fkxxxx-api2dkey2"
5
 
6
  # [step 2]>> 改为True应用代理,如果直接在海外服务器部署,此处不修改
7
  USE_PROXY = False
 
12
  # [地址] 懂的都懂,不懂就填localhost或者127.0.0.1肯定错不了(localhost意思是代理软件安装在本机上)
13
  # [端口] 在代理软件的设置里找。虽然不同的代理软件界面不一样,但端口号都应该在最显眼的位置上
14
 
15
+ # 代理网络的地址,打开你的*学*网软件查看代理的协议(socks5/http)、地址(localhost)和端口(11284)
16
  proxies = {
17
  # [协议]:// [地址] :[端口]
18
+ "http": "socks5h://localhost:11284", # 再例如 "http": "http://127.0.0.1:7890",
19
+ "https": "socks5h://localhost:11284", # 再例如 "https": "http://127.0.0.1:7890",
20
  }
21
  else:
22
  proxies = None
 
28
 
29
  # [step 4]>> 以下配置可以优化体验,但大部分场合下并不需要修改
30
  # 对话窗的高度
31
+ CHATBOT_HEIGHT = 600
32
 
33
  # 代码高亮
34
  CODE_HIGHLIGHT = True
35
 
36
  # 窗口布局
37
  LAYOUT = "LEFT-RIGHT" # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局)
38
+ DARK_MODE = True # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局)
39
 
40
  # 发送请求到OpenAI后,等待多久判定为超时
41
  TIMEOUT_SECONDS = 30
 
46
  # 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制
47
  MAX_RETRY = 2
48
 
49
+ # 模型选择是
50
+ LLM_MODEL = "newbing" # 可选 ↓↓↓
51
+ AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"]
52
+ # P.S. 其他可用的模型还包括 ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
53
 
54
  # 本地LLM模型如ChatGLM的执行方式 CPU/GPU
55
  LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
 
57
  # 设置gradio的并行线程数(不需要修改)
58
  CONCURRENT_COUNT = 100
59
 
60
+ # 加一个live2d装饰
61
+ ADD_WAIFU = False
62
+
63
  # 设置用户名和密码(不需要修改)(相关功能不稳定,与gradio版本和网络都相关,如果本地使用不建议加这个)
64
  # [("username", "password"), ("username2", "password2"), ...]
65
  AUTHENTICATION = []
66
 
67
  # 重新URL重新定向,实现更换API_URL的作用(常规情况下,不要修改!!)
68
+ # (高危设置!通过修改此设置,您将把您的API-KEY和对话隐私完全暴露给您设定的中间人!)
69
+ # 格式 {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"}
70
+ # 例如 API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "https://ai.open.com/api/conversation"}
71
  API_URL_REDIRECT = {}
72
 
73
  # 如果需要在二级路径下运行(常规情况下,不要修改!!)(需要配合修改main.py才能生效!)
74
  CUSTOM_PATH = "/"
75
+
76
+ # 如果需要使用newbing,把newbing的长长的cookie放到这里
77
+ NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"]
78
+ NEWBING_COOKIES = os.environ.get("NEWBING_COOKIES")
79
+
80
+ # 如果需要使用Slack Claude,使用教程详情见 request_llm/README.md
81
+ SLACK_CLAUDE_BOT_ID = ''
82
+ SLACK_CLAUDE_USER_TOKEN = ''
core_functional.py CHANGED
@@ -68,4 +68,11 @@ def get_core_functions():
68
  "Prefix": r"请解释以下代码:" + "\n```\n",
69
  "Suffix": "\n```\n",
70
  },
 
 
 
 
 
 
 
71
  }
 
68
  "Prefix": r"请解释以下代码:" + "\n```\n",
69
  "Suffix": "\n```\n",
70
  },
71
+ "参考文献转Bib": {
72
+ "Prefix": r"Here are some bibliography items, please transform them into bibtex style." +
73
+ r"Note that, reference styles maybe more than one kind, you should transform each item correctly." +
74
+ r"Items need to be transformed:",
75
+ "Suffix": r"",
76
+ "Visible": False,
77
+ }
78
  }
crazy_functional.py CHANGED
@@ -10,8 +10,9 @@ def get_crazy_functions():
10
  from crazy_functions.解析项目源代码 import 解析一个C项目的头文件
11
  from crazy_functions.解析项目源代码 import 解析一个C项目
12
  from crazy_functions.解析项目源代码 import 解析一个Golang项目
 
13
  from crazy_functions.解析项目源代码 import 解析一个Java项目
14
- from crazy_functions.解析项目源代码 import 解析一个Rect项目
15
  from crazy_functions.高级功能函数模板 import 高阶功能模板函数
16
  from crazy_functions.代码重写为全英文_多线程 import 全项目切换英文
17
  from crazy_functions.Latex全文润色 import Latex英文润色
@@ -21,20 +22,30 @@ def get_crazy_functions():
21
  from crazy_functions.总结word文档 import 总结word文档
22
  from crazy_functions.解析JupyterNotebook import 解析ipynb文件
23
  from crazy_functions.对话历史存档 import 对话历史存档
 
 
 
 
24
  function_plugins = {
25
-
26
  "解析整个Python项目": {
27
  "Color": "stop", # 按钮颜色
28
  "Function": HotReload(解析一个Python项目)
29
  },
30
- "保存当前的对话": {
 
31
  "AsButton":False,
32
- "Function": HotReload(对话历史存档)
 
 
 
 
33
  },
34
  "[测试功能] 解析Jupyter Notebook文件": {
35
  "Color": "stop",
36
  "AsButton":False,
37
  "Function": HotReload(解析ipynb文件),
 
 
38
  },
39
  "批量总结Word文档": {
40
  "Color": "stop",
@@ -55,15 +66,20 @@ def get_crazy_functions():
55
  "AsButton": False, # 加入下拉菜单中
56
  "Function": HotReload(解析一个Golang项目)
57
  },
 
 
 
 
 
58
  "解析整个Java项目": {
59
  "Color": "stop", # 按钮颜色
60
  "AsButton": False, # 加入下拉菜单中
61
  "Function": HotReload(解析一个Java项目)
62
  },
63
- "解析整个React项目": {
64
  "Color": "stop", # 按钮颜色
65
  "AsButton": False, # 加入下拉菜单中
66
- "Function": HotReload(解析一个Rect项目)
67
  },
68
  "解析整个Lua项目": {
69
  "Color": "stop", # 按钮颜色
@@ -79,19 +95,29 @@ def get_crazy_functions():
79
  "Color": "stop", # 按钮颜色
80
  "Function": HotReload(读文章写摘要)
81
  },
 
 
 
 
 
82
  "批量生成函数注释": {
83
  "Color": "stop", # 按钮颜色
 
84
  "Function": HotReload(批量生成函数注释)
85
  },
 
 
 
86
  "[多线程Demo] 解析此项目本身(源码自译解)": {
 
87
  "Function": HotReload(解析项目本身)
88
  },
89
- "[多线程demo] 把本项目源代码切换成全英文": {
90
  # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
91
  "AsButton": False, # 加入下拉菜单中
92
  "Function": HotReload(全项目切换英文)
93
  },
94
- "[函数插件模板Demo] 历史上的今天": {
95
  # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
96
  "Function": HotReload(高阶功能模板函数)
97
  },
@@ -108,7 +134,6 @@ def get_crazy_functions():
108
  from crazy_functions.Latex全文翻译 import Latex中译英
109
  from crazy_functions.Latex全文翻译 import Latex英译中
110
  from crazy_functions.批量Markdown翻译 import Markdown中译英
111
- from crazy_functions.批量Markdown翻译 import Markdown英译中
112
 
113
  function_plugins.update({
114
  "批量翻译PDF文档(多线程)": {
@@ -155,30 +180,25 @@ def get_crazy_functions():
155
  "AsButton": False, # 加入下拉菜单中
156
  "Function": HotReload(Latex中文润色)
157
  },
158
- "[测试功能] Latex项目全文中译英(输入路径或上传压缩包)": {
159
  # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
160
  "Color": "stop",
161
  "AsButton": False, # 加入下拉菜单中
162
  "Function": HotReload(Latex中译英)
163
  },
164
- "[测试功能] Latex项目全文英译中(输入路径或上传压缩包)": {
165
  # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
166
  "Color": "stop",
167
  "AsButton": False, # 加入下拉菜单中
168
  "Function": HotReload(Latex英译中)
169
  },
170
- "[测试功能] 批量Markdown中译英(输入路径或上传压缩包)": {
171
  # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
172
  "Color": "stop",
173
  "AsButton": False, # 加入下拉菜单中
174
  "Function": HotReload(Markdown中译英)
175
  },
176
- "[测试功能] 批量Markdown英译中(输入路径或上传压缩包)": {
177
- # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
178
- "Color": "stop",
179
- "AsButton": False, # 加入下拉菜单中
180
- "Function": HotReload(Markdown英译中)
181
- },
182
 
183
  })
184
 
@@ -222,5 +242,25 @@ def get_crazy_functions():
222
  "Function": HotReload(同时问询_指定模型)
223
  },
224
  })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225
  ###################### 第n组插件 ###########################
226
  return function_plugins
 
10
  from crazy_functions.解析项目源代码 import 解析一个C项目的头文件
11
  from crazy_functions.解析项目源代码 import 解析一个C项目
12
  from crazy_functions.解析项目源代码 import 解析一个Golang项目
13
+ from crazy_functions.解析项目源代码 import 解析一个Rust项目
14
  from crazy_functions.解析项目源代码 import 解析一个Java项目
15
+ from crazy_functions.解析项目源代码 import 解析一个前端项目
16
  from crazy_functions.高级功能函数模板 import 高阶功能模板函数
17
  from crazy_functions.代码重写为全英文_多线程 import 全项目切换英文
18
  from crazy_functions.Latex全文润色 import Latex英文润色
 
22
  from crazy_functions.总结word文档 import 总结word文档
23
  from crazy_functions.解析JupyterNotebook import 解析ipynb文件
24
  from crazy_functions.对话历史存档 import 对话历史存档
25
+ from crazy_functions.对话历史存档 import 载入对话历史存档
26
+ from crazy_functions.对话历史存档 import 删除所有本地对话历史记录
27
+
28
+ from crazy_functions.批量Markdown翻译 import Markdown英译中
29
  function_plugins = {
 
30
  "解析整个Python项目": {
31
  "Color": "stop", # 按钮颜色
32
  "Function": HotReload(解析一个Python项目)
33
  },
34
+ "载入对话历史存档(先上传存档或输入路径)": {
35
+ "Color": "stop",
36
  "AsButton":False,
37
+ "Function": HotReload(载入对话历史存档)
38
+ },
39
+ "删除所有本地对话历史记录(请谨慎操作)": {
40
+ "AsButton":False,
41
+ "Function": HotReload(删除所有本地对话历史记录)
42
  },
43
  "[测试功能] 解析Jupyter Notebook文件": {
44
  "Color": "stop",
45
  "AsButton":False,
46
  "Function": HotReload(解析ipynb文件),
47
+ "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
48
+ "ArgsReminder": "若输入0,则不解析notebook中的Markdown块", # 高级参数输入区的显示提示
49
  },
50
  "批量总结Word文档": {
51
  "Color": "stop",
 
66
  "AsButton": False, # 加入下拉菜单中
67
  "Function": HotReload(解析一个Golang项目)
68
  },
69
+ "解析整个Rust项目": {
70
+ "Color": "stop", # 按钮颜色
71
+ "AsButton": False, # 加入下拉菜单中
72
+ "Function": HotReload(解析一个Rust项目)
73
+ },
74
  "解析整个Java项目": {
75
  "Color": "stop", # 按钮颜色
76
  "AsButton": False, # 加入下拉菜单中
77
  "Function": HotReload(解析一个Java项目)
78
  },
79
+ "解析整个前端项目(js,ts,css等)": {
80
  "Color": "stop", # 按钮颜色
81
  "AsButton": False, # 加入下拉菜单中
82
+ "Function": HotReload(解析一个前端项目)
83
  },
84
  "解析整个Lua项目": {
85
  "Color": "stop", # 按钮颜色
 
95
  "Color": "stop", # 按钮颜色
96
  "Function": HotReload(读文章写摘要)
97
  },
98
+ "Markdown/Readme英译中": {
99
+ # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
100
+ "Color": "stop",
101
+ "Function": HotReload(Markdown英译中)
102
+ },
103
  "批量生成函数注释": {
104
  "Color": "stop", # 按钮颜色
105
+ "AsButton": False, # 加入下拉菜单中
106
  "Function": HotReload(批量生成函数注释)
107
  },
108
+ "保存当前的对话": {
109
+ "Function": HotReload(对话历史存档)
110
+ },
111
  "[多线程Demo] 解析此项目本身(源码自译解)": {
112
+ "AsButton": False, # 加入下拉菜单中
113
  "Function": HotReload(解析项目本身)
114
  },
115
+ "[老旧的Demo] 把本项目源代码切换成全英文": {
116
  # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
117
  "AsButton": False, # 加入下拉菜单中
118
  "Function": HotReload(全项目切换英文)
119
  },
120
+ "[插件demo] 历史上的今天": {
121
  # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
122
  "Function": HotReload(高阶功能模板函数)
123
  },
 
134
  from crazy_functions.Latex全文翻译 import Latex中译英
135
  from crazy_functions.Latex全文翻译 import Latex英译中
136
  from crazy_functions.批量Markdown翻译 import Markdown中译英
 
137
 
138
  function_plugins.update({
139
  "批量翻译PDF文档(多线程)": {
 
180
  "AsButton": False, # 加入下拉菜单中
181
  "Function": HotReload(Latex中文润色)
182
  },
183
+ "Latex项目全文中译英(输入路径或上传压缩包)": {
184
  # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
185
  "Color": "stop",
186
  "AsButton": False, # 加入下拉菜单中
187
  "Function": HotReload(Latex中译英)
188
  },
189
+ "Latex项目全文英译中(输入路径或上传压缩包)": {
190
  # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
191
  "Color": "stop",
192
  "AsButton": False, # 加入下拉菜单中
193
  "Function": HotReload(Latex英译中)
194
  },
195
+ "批量Markdown中译英(输入路径或上传压缩包)": {
196
  # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
197
  "Color": "stop",
198
  "AsButton": False, # 加入下拉菜单中
199
  "Function": HotReload(Markdown中译英)
200
  },
201
+
 
 
 
 
 
202
 
203
  })
204
 
 
242
  "Function": HotReload(同时问询_指定模型)
243
  },
244
  })
245
+ from crazy_functions.图片生成 import 图片生成
246
+ function_plugins.update({
247
+ "图片生成(先切换模型到openai或api2d)": {
248
+ "Color": "stop",
249
+ "AsButton": False,
250
+ "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
251
+ "ArgsReminder": "在这里输入分辨率, 如256x256(默认)", # 高级参数输入区的显示提示
252
+ "Function": HotReload(图片生成)
253
+ },
254
+ })
255
+ from crazy_functions.总结音视频 import 总结音视频
256
+ function_plugins.update({
257
+ "批量总结音视频(输入路径或上传压缩包)": {
258
+ "Color": "stop",
259
+ "AsButton": False,
260
+ "AdvancedArgs": True,
261
+ "ArgsReminder": "调用openai api 使用whisper-1模型, 目前支持的格式:mp4, m4a, wav, mpga, mpeg, mp3。此处可以输入解析提示,例如:解析为简体中文(默认)。",
262
+ "Function": HotReload(总结音视频)
263
+ }
264
+ })
265
  ###################### 第n组插件 ###########################
266
  return function_plugins
crazy_functions/crazy_functions_test.py CHANGED
@@ -81,29 +81,13 @@ def test_下载arxiv论文并翻译摘要():
81
 
82
  def test_联网回答问题():
83
  from crazy_functions.联网的ChatGPT import 连接网络回答问题
84
- # txt = "“我们称之为高效”是什么梗?"
85
- # >> 从第0份、第1份、第2份搜索结果可以看出,“我们称之为高效”是指在游戏社区中,用户们用来形容一些游戏策略或行为非常高效且能够带来好的效果的用语。这个用语最初可能是在群星(Stellaris)这个游戏里面流行起来的,后来也传播到了其他游戏中,比如巨像(Titan)等游戏。其中第1份搜索结果中的一篇文章也指出,“我们称之为高效”这 一用语来源于群星(Stellaris)游戏中的一个情节。
86
- # txt = "为什么说枪毙P社玩家没有一个冤枉的?"
87
- # >> 它们都是关于一个知乎用户所发的帖子,引用了一群游戏玩家对于需要对P社玩家进行枪毙的讨论,这个话题的本质是玩家们对于P 社游戏中的政治与历史元素的不同看法,以及其中不少玩家以极端立场宣扬的想法和言论,因此有人就以枪毙这些玩家来回应此类言论。但是这个话题本身并没有实质内容,只是一个玩笑或者恶搞,并不应该被当做真实的态度或者观点,因此这种说法没有实际意义。
88
  # txt = "谁是应急食品?"
89
  # >> '根据以上搜索结果可以得知,应急食品是“原神”游戏中的角色派蒙的外号。'
90
  # txt = "道路千万条,安全第一条。后面两句是?"
91
  # >> '行车不规范,亲人两行泪。'
92
- # txt = "What is in the canister?"
93
- # >> Rainbow Six Siege 游戏中 Smoke 的 Canister 中装有何种物质相关的官方信息。
94
- # txt = "失败的man是什么?"
95
- # >> 根据第1份搜索结果,可以得知失败的man是指一位在B站购买了蜘蛛侠COS服后穿上后被网友嘲笑的UP主,而“失败的man”是蜘蛛侠英文名“spiderman”的谐音梗,并且网友们还 给这位UP主起了“苍蝇侠”的外号。因此,失败的man是指这位UP主在穿上蜘蛛侠COS服后被网友嘲笑的情况。
96
- # txt = "老六是什么,起源于哪里?"
97
- # >> 老六是网络流行语,最初起源于游戏《CSGO》,指游戏中玩家中独来独往、游离于队伍之外的“自由人”或玩得比较菜或者玩得比较阴险的人 ,后来逐渐演变成指玩得比较阴险的玩家。
98
- # txt = "罗小黑战记因为什么经常被吐槽?"
99
- # >> 3. 更新速度。罗小黑战记的更新时间不定,时而快时而慢,给观众留下了等待的时间过长的印象。
100
- # txt = "沙特、伊朗最近的关系如何?"
101
- # >> 最近在中国的斡旋下,沙特和伊朗于3月10日达成了恢复两国外交关系的协议,这表明两国关系已经重新回到正常化状态。
102
  # txt = "You should have gone for the head. What does that mean?"
103
  # >> The phrase "You should have gone for the head" is a quote from the Marvel movies, Avengers: Infinity War and Avengers: Endgame. It was spoken by the character Thanos in Infinity War and by Thor in Endgame.
104
  txt = "AutoGPT是什么?"
105
- # >> AutoGPT是一个基于GPT-4语言模型的开源应用程序。它可以根据用户需求自主执行任务,包括事件分析、营销方案撰写、代码编程、数学运算等等,并完全不需要用户插手。它可以自己思考,给出实现的步骤和实现细节,甚至可以自问自答执 行任务。最近它在GitHub上爆火,成为了业内最热门的项目之一。
106
- # txt = "钟离带什么圣遗物?"
107
  for cookies, cb, hist, msg in 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
108
  print("当前问答:", cb[-1][-1].replace("\n"," "))
109
  for i, it in enumerate(cb): print亮蓝(it[0]); print亮黄(it[1])
 
81
 
82
  def test_联网回答问题():
83
  from crazy_functions.联网的ChatGPT import 连接网络回答问题
 
 
 
 
84
  # txt = "谁是应急食品?"
85
  # >> '根据以上搜索结果可以得知,应急食品是“原神”游戏中的角色派蒙的外号。'
86
  # txt = "道路千万条,安全第一条。后面两句是?"
87
  # >> '行车不规范,亲人两行泪。'
 
 
 
 
 
 
 
 
 
 
88
  # txt = "You should have gone for the head. What does that mean?"
89
  # >> The phrase "You should have gone for the head" is a quote from the Marvel movies, Avengers: Infinity War and Avengers: Endgame. It was spoken by the character Thanos in Infinity War and by Thor in Endgame.
90
  txt = "AutoGPT是什么?"
 
 
91
  for cookies, cb, hist, msg in 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
92
  print("当前问答:", cb[-1][-1].replace("\n"," "))
93
  for i, it in enumerate(cb): print亮蓝(it[0]); print亮黄(it[1])
crazy_functions/crazy_utils.py CHANGED
@@ -1,5 +1,4 @@
1
- import traceback
2
- from toolbox import update_ui, get_conf
3
 
4
  def input_clipping(inputs, history, max_token_limit):
5
  import numpy as np
@@ -94,12 +93,12 @@ def request_gpt_model_in_new_thread_with_ui_alive(
94
  continue # 返回重试
95
  else:
96
  # 【选择放弃】
97
- tb_str = '```\n' + traceback.format_exc() + '```'
98
  mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
99
  return mutable[0] # 放弃
100
  except:
101
  # 【第三种情况】:其他错误:重试几次
102
- tb_str = '```\n' + traceback.format_exc() + '```'
103
  print(tb_str)
104
  mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
105
  if retry_op > 0:
@@ -173,7 +172,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
173
  if max_workers == -1: # 读取配置文件
174
  try: max_workers, = get_conf('DEFAULT_WORKER_NUM')
175
  except: max_workers = 8
176
- if max_workers <= 0 or max_workers >= 20: max_workers = 8
177
  # 屏蔽掉 chatglm的多线程,可能会导致严重卡顿
178
  if not (llm_kwargs['llm_model'].startswith('gpt-') or llm_kwargs['llm_model'].startswith('api2d-')):
179
  max_workers = 1
@@ -220,14 +219,14 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
220
  continue # 返回重试
221
  else:
222
  # 【选择放弃】
223
- tb_str = '```\n' + traceback.format_exc() + '```'
224
  gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
225
  if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
226
  mutable[index][2] = "输入过长已放弃"
227
  return gpt_say # 放弃
228
  except:
229
  # 【第三种情况】:其他错误
230
- tb_str = '```\n' + traceback.format_exc() + '```'
231
  print(tb_str)
232
  gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
233
  if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
@@ -564,3 +563,46 @@ def read_and_clean_pdf_text(fp):
564
  # print亮绿('***************************')
565
 
566
  return meta_txt, page_one_meta
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from toolbox import update_ui, get_conf, trimmed_format_exc
 
2
 
3
  def input_clipping(inputs, history, max_token_limit):
4
  import numpy as np
 
93
  continue # 返回重试
94
  else:
95
  # 【选择放弃】
96
+ tb_str = '```\n' + trimmed_format_exc() + '```'
97
  mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
98
  return mutable[0] # 放弃
99
  except:
100
  # 【第三种情况】:其他错误:重试几次
101
+ tb_str = '```\n' + trimmed_format_exc() + '```'
102
  print(tb_str)
103
  mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
104
  if retry_op > 0:
 
172
  if max_workers == -1: # 读取配置文件
173
  try: max_workers, = get_conf('DEFAULT_WORKER_NUM')
174
  except: max_workers = 8
175
+ if max_workers <= 0: max_workers = 3
176
  # 屏蔽掉 chatglm的多线程,可能会导致严重卡顿
177
  if not (llm_kwargs['llm_model'].startswith('gpt-') or llm_kwargs['llm_model'].startswith('api2d-')):
178
  max_workers = 1
 
219
  continue # 返回重试
220
  else:
221
  # 【选择放弃】
222
+ tb_str = '```\n' + trimmed_format_exc() + '```'
223
  gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
224
  if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
225
  mutable[index][2] = "输入过长已放弃"
226
  return gpt_say # 放弃
227
  except:
228
  # 【第三种情况】:其他错误
229
+ tb_str = '```\n' + trimmed_format_exc() + '```'
230
  print(tb_str)
231
  gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
232
  if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
 
563
  # print亮绿('***************************')
564
 
565
  return meta_txt, page_one_meta
566
+
567
+
568
+ def get_files_from_everything(txt, type): # type='.md'
569
+ """
570
+ 这个函数是用来获取指定目录下所有指定类型(如.md)的文件,并且对于网络上的文件,也可以获取它。
571
+ 下面是对每个参数和返回值的说明:
572
+ 参数
573
+ - txt: 路径或网址,表示要搜索的文件或者文件夹路径或网络上的文件。
574
+ - type: 字符串,表示要搜索的文件类型。默认是.md。
575
+ 返回值
576
+ - success: 布尔值,表示函数是否成功执行。
577
+ - file_manifest: 文件路径列表,里面包含以指定类型为后缀名的所有文件的绝对路径。
578
+ - project_folder: 字符串,表示文件所在的文件夹路径。如果是网络上的文件,就是临时文件夹的路径。
579
+ 该函数详细注释已添加,请确认是否满足您的需要。
580
+ """
581
+ import glob, os
582
+
583
+ success = True
584
+ if txt.startswith('http'):
585
+ # 网络的远程文件
586
+ import requests
587
+ from toolbox import get_conf
588
+ proxies, = get_conf('proxies')
589
+ r = requests.get(txt, proxies=proxies)
590
+ with open('./gpt_log/temp'+type, 'wb+') as f: f.write(r.content)
591
+ project_folder = './gpt_log/'
592
+ file_manifest = ['./gpt_log/temp'+type]
593
+ elif txt.endswith(type):
594
+ # 直接给定文件
595
+ file_manifest = [txt]
596
+ project_folder = os.path.dirname(txt)
597
+ elif os.path.exists(txt):
598
+ # 本地路径,递归搜索
599
+ project_folder = txt
600
+ file_manifest = [f for f in glob.glob(f'{project_folder}/**/*'+type, recursive=True)]
601
+ if len(file_manifest) == 0:
602
+ success = False
603
+ else:
604
+ project_folder = None
605
+ file_manifest = []
606
+ success = False
607
+
608
+ return success, file_manifest, project_folder
crazy_functions/图片生成.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from toolbox import CatchException, update_ui, get_conf, select_api_key
2
+ from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
3
+ import datetime
4
+
5
+
6
+ def gen_image(llm_kwargs, prompt, resolution="256x256"):
7
+ import requests, json, time, os
8
+ from request_llm.bridge_all import model_info
9
+
10
+ proxies, = get_conf('proxies')
11
+ # Set up OpenAI API key and model
12
+ api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
13
+ chat_endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
14
+ # 'https://api.openai.com/v1/chat/completions'
15
+ img_endpoint = chat_endpoint.replace('chat/completions','images/generations')
16
+ # # Generate the image
17
+ url = img_endpoint
18
+ headers = {
19
+ 'Authorization': f"Bearer {api_key}",
20
+ 'Content-Type': 'application/json'
21
+ }
22
+ data = {
23
+ 'prompt': prompt,
24
+ 'n': 1,
25
+ 'size': resolution,
26
+ 'response_format': 'url'
27
+ }
28
+ response = requests.post(url, headers=headers, json=data, proxies=proxies)
29
+ print(response.content)
30
+ image_url = json.loads(response.content.decode('utf8'))['data'][0]['url']
31
+
32
+ # 文件保存到本地
33
+ r = requests.get(image_url, proxies=proxies)
34
+ file_path = 'gpt_log/image_gen/'
35
+ os.makedirs(file_path, exist_ok=True)
36
+ file_name = 'Image' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.png'
37
+ with open(file_path+file_name, 'wb+') as f: f.write(r.content)
38
+
39
+
40
+ return image_url, file_path+file_name
41
+
42
+
43
+
44
+ @CatchException
45
+ def 图片生成(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
46
+ """
47
+ txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
48
+ llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
49
+ plugin_kwargs 插件模型的参数,暂时没有用武之地
50
+ chatbot 聊天显示框的句柄,用于显示给用户
51
+ history 聊天历史,前情提要
52
+ system_prompt 给gpt的静默提醒
53
+ web_port 当前软件运行的端口号
54
+ """
55
+ history = [] # 清空历史,以免输入溢出
56
+ chatbot.append(("这是什么功能?", "[Local Message] 生成图像, 请先把模型切换至gpt-xxxx或者api2d-xxxx。如果中文效果不理想, 尝试Prompt。正在处理中 ....."))
57
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
58
+ if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
59
+ resolution = plugin_kwargs.get("advanced_arg", '256x256')
60
+ image_url, image_path = gen_image(llm_kwargs, prompt, resolution)
61
+ chatbot.append([prompt,
62
+ f'图像中转网址: <br/>`{image_url}`<br/>'+
63
+ f'中转网址预览: <br/><div align="center"><img src="{image_url}"></div>'
64
+ f'本地文件地址: <br/>`{image_path}`<br/>'+
65
+ f'本地文件预览: <br/><div align="center"><img src="file={image_path}"></div>'
66
+ ])
67
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
crazy_functions/对话历史存档.py CHANGED
@@ -1,7 +1,8 @@
1
  from toolbox import CatchException, update_ui
2
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
 
3
 
4
- def write_chat_to_file(chatbot, file_name=None):
5
  """
6
  将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
7
  """
@@ -11,20 +12,62 @@ def write_chat_to_file(chatbot, file_name=None):
11
  file_name = 'chatGPT对话历史' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.html'
12
  os.makedirs('./gpt_log/', exist_ok=True)
13
  with open(f'./gpt_log/{file_name}', 'w', encoding='utf8') as f:
 
 
14
  for i, contents in enumerate(chatbot):
15
- for content in contents:
16
  try: # 这个bug没找到触发条件,暂时先这样顶一下
17
  if type(content) != str: content = str(content)
18
  except:
19
  continue
20
  f.write(content)
21
- f.write('\n\n')
 
22
  f.write('<hr color="red"> \n\n')
23
-
 
 
 
 
24
  res = '对话历史写入:' + os.path.abspath(f'./gpt_log/{file_name}')
25
  print(res)
26
  return res
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  @CatchException
29
  def 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
30
  """
@@ -37,6 +80,64 @@ def 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
37
  web_port 当前软件运行的端口号
38
  """
39
 
40
- chatbot.append(("保存当前对话", f"[Local Message] {write_chat_to_file(chatbot)}"))
 
41
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from toolbox import CatchException, update_ui
2
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
3
+ import re
4
 
5
+ def write_chat_to_file(chatbot, history=None, file_name=None):
6
  """
7
  将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
8
  """
 
12
  file_name = 'chatGPT对话历史' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.html'
13
  os.makedirs('./gpt_log/', exist_ok=True)
14
  with open(f'./gpt_log/{file_name}', 'w', encoding='utf8') as f:
15
+ from theme import advanced_css
16
+ f.write(f'<!DOCTYPE html><head><meta charset="utf-8"><title>对话历史</title><style>{advanced_css}</style></head>')
17
  for i, contents in enumerate(chatbot):
18
+ for j, content in enumerate(contents):
19
  try: # 这个bug没找到触发条件,暂时先这样顶一下
20
  if type(content) != str: content = str(content)
21
  except:
22
  continue
23
  f.write(content)
24
+ if j == 0:
25
+ f.write('<hr style="border-top: dotted 3px #ccc;">')
26
  f.write('<hr color="red"> \n\n')
27
+ f.write('<hr color="blue"> \n\n raw chat context:\n')
28
+ f.write('<code>')
29
+ for h in history:
30
+ f.write("\n>>>" + h)
31
+ f.write('</code>')
32
  res = '对话历史写入:' + os.path.abspath(f'./gpt_log/{file_name}')
33
  print(res)
34
  return res
35
 
36
+ def gen_file_preview(file_name):
37
+ try:
38
+ with open(file_name, 'r', encoding='utf8') as f:
39
+ file_content = f.read()
40
+ # pattern to match the text between <head> and </head>
41
+ pattern = re.compile(r'<head>.*?</head>', flags=re.DOTALL)
42
+ file_content = re.sub(pattern, '', file_content)
43
+ html, history = file_content.split('<hr color="blue"> \n\n raw chat context:\n')
44
+ history = history.strip('<code>')
45
+ history = history.strip('</code>')
46
+ history = history.split("\n>>>")
47
+ return list(filter(lambda x:x!="", history))[0][:100]
48
+ except:
49
+ return ""
50
+
51
+ def read_file_to_chat(chatbot, history, file_name):
52
+ with open(file_name, 'r', encoding='utf8') as f:
53
+ file_content = f.read()
54
+ # pattern to match the text between <head> and </head>
55
+ pattern = re.compile(r'<head>.*?</head>', flags=re.DOTALL)
56
+ file_content = re.sub(pattern, '', file_content)
57
+ html, history = file_content.split('<hr color="blue"> \n\n raw chat context:\n')
58
+ history = history.strip('<code>')
59
+ history = history.strip('</code>')
60
+ history = history.split("\n>>>")
61
+ history = list(filter(lambda x:x!="", history))
62
+ html = html.split('<hr color="red"> \n\n')
63
+ html = list(filter(lambda x:x!="", html))
64
+ chatbot.clear()
65
+ for i, h in enumerate(html):
66
+ i_say, gpt_say = h.split('<hr style="border-top: dotted 3px #ccc;">')
67
+ chatbot.append([i_say, gpt_say])
68
+ chatbot.append([f"存档文件详情?", f"[Local Message] 载入对话{len(html)}条,上下文{len(history)}条。"])
69
+ return chatbot, history
70
+
71
  @CatchException
72
  def 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
73
  """
 
80
  web_port 当前软件运行的端口号
81
  """
82
 
83
+ chatbot.append(("保存当前对话",
84
+ f"[Local Message] {write_chat_to_file(chatbot, history)},您可以调用“载入对话历史存档”还原当下的对话。\n警告!被保存的对话历史可以被使用该系统的任何人查阅。"))
85
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
86
 
87
+ def hide_cwd(str):
88
+ import os
89
+ current_path = os.getcwd()
90
+ replace_path = "."
91
+ return str.replace(current_path, replace_path)
92
+
93
+ @CatchException
94
+ def 载入对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
95
+ """
96
+ txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
97
+ llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
98
+ plugin_kwargs 插件模型的参数,暂时没有用武之地
99
+ chatbot 聊天显示框的句柄,用于显示给用户
100
+ history 聊天历史,前情提要
101
+ system_prompt 给gpt的静默提醒
102
+ web_port 当前软件运行的端口号
103
+ """
104
+ from .crazy_utils import get_files_from_everything
105
+ success, file_manifest, _ = get_files_from_everything(txt, type='.html')
106
+
107
+ if not success:
108
+ if txt == "": txt = '空空如也的输入栏'
109
+ import glob
110
+ local_history = "<br/>".join(["`"+hide_cwd(f)+f" ({gen_file_preview(f)})"+"`" for f in glob.glob(f'gpt_log/**/chatGPT对话历史*.html', recursive=True)])
111
+ chatbot.append([f"正在查找对话历史文件(html格式): {txt}", f"找不到任何html文件: {txt}。但本地存储了以下历史文件,您可以将任意一个文件路径粘贴到输入区,然后重试:<br/>{local_history}"])
112
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
113
+ return
114
+
115
+ try:
116
+ chatbot, history = read_file_to_chat(chatbot, history, file_manifest[0])
117
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
118
+ except:
119
+ chatbot.append([f"载入对话历史文件", f"对话历史文件损坏!"])
120
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
121
+ return
122
+
123
+ @CatchException
124
+ def 删除所有本地对话历史记录(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
125
+ """
126
+ txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
127
+ llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
128
+ plugin_kwargs 插件模型的参数,暂时没有用武之地
129
+ chatbot 聊天显示框的句柄,用于显示给用户
130
+ history 聊天历史,前情提要
131
+ system_prompt 给gpt的静默提醒
132
+ web_port 当前软件运行的端口号
133
+ """
134
+
135
+ import glob, os
136
+ local_history = "<br/>".join(["`"+hide_cwd(f)+"`" for f in glob.glob(f'gpt_log/**/chatGPT对话历史*.html', recursive=True)])
137
+ for f in glob.glob(f'gpt_log/**/chatGPT对话历史*.html', recursive=True):
138
+ os.remove(f)
139
+ chatbot.append([f"删除所有历史对话文件", f"已删除<br/>{local_history}"])
140
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
141
+ return
142
+
143
+
crazy_functions/总结word文档.py CHANGED
@@ -85,7 +85,7 @@ def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pr
85
  # 基本信息:功能、贡献者
86
  chatbot.append([
87
  "函数插件功能?",
88
- "批量总结Word文档。函数插件贡献者: JasonGuo1"])
89
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
90
 
91
  # 尝试导入依赖,如果缺少依赖,则给出安装建议
 
85
  # 基本信息:功能、贡献者
86
  chatbot.append([
87
  "函数插件功能?",
88
+ "批量总结Word文档。函数插件贡献者: JasonGuo1。注意, 如果是.doc文件, 请先转化为.docx格式。"])
89
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
90
 
91
  # 尝试导入依赖,如果缺少依赖,则给出安装建议
crazy_functions/总结音视频.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from toolbox import CatchException, report_execption, select_api_key, update_ui, write_results_to_file, get_conf
2
+ from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
3
+
4
+ def split_audio_file(filename, split_duration=1000):
5
+ """
6
+ 根据给定的切割时长将音频文件切割成多个片段。
7
+
8
+ Args:
9
+ filename (str): 需要被切割的音频文件名。
10
+ split_duration (int, optional): 每个切割音频片段的时长(以秒为单位)。默认值为1000。
11
+
12
+ Returns:
13
+ filelist (list): 一个包含所有切割音频片段文件路径的列表。
14
+
15
+ """
16
+ from moviepy.editor import AudioFileClip
17
+ import os
18
+ os.makedirs('gpt_log/mp3/cut/', exist_ok=True) # 创建存储切割音频的文件夹
19
+
20
+ # 读取音频文件
21
+ audio = AudioFileClip(filename)
22
+
23
+ # 计算文件总时长和切割点
24
+ total_duration = audio.duration
25
+ split_points = list(range(0, int(total_duration), split_duration))
26
+ split_points.append(int(total_duration))
27
+ filelist = []
28
+
29
+ # 切割音频文件
30
+ for i in range(len(split_points) - 1):
31
+ start_time = split_points[i]
32
+ end_time = split_points[i + 1]
33
+ split_audio = audio.subclip(start_time, end_time)
34
+ split_audio.write_audiofile(f"gpt_log/mp3/cut/{filename[0]}_{i}.mp3")
35
+ filelist.append(f"gpt_log/mp3/cut/{filename[0]}_{i}.mp3")
36
+
37
+ audio.close()
38
+ return filelist
39
+
40
+ def AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history):
41
+ import os, requests
42
+ from moviepy.editor import AudioFileClip
43
+ from request_llm.bridge_all import model_info
44
+
45
+ # 设置OpenAI密钥和模型
46
+ api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
47
+ chat_endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
48
+
49
+ whisper_endpoint = chat_endpoint.replace('chat/completions', 'audio/transcriptions')
50
+ url = whisper_endpoint
51
+ headers = {
52
+ 'Authorization': f"Bearer {api_key}"
53
+ }
54
+
55
+ os.makedirs('gpt_log/mp3/', exist_ok=True)
56
+ for index, fp in enumerate(file_manifest):
57
+ audio_history = []
58
+ # 提取文件扩展名
59
+ ext = os.path.splitext(fp)[1]
60
+ # 提取视频中的音频
61
+ if ext not in [".mp3", ".wav", ".m4a", ".mpga"]:
62
+ audio_clip = AudioFileClip(fp)
63
+ audio_clip.write_audiofile(f'gpt_log/mp3/output{index}.mp3')
64
+ fp = f'gpt_log/mp3/output{index}.mp3'
65
+ # 调用whisper模型音频转文字
66
+ voice = split_audio_file(fp)
67
+ for j, i in enumerate(voice):
68
+ with open(i, 'rb') as f:
69
+ file_content = f.read() # 读取文件内容到内存
70
+ files = {
71
+ 'file': (os.path.basename(i), file_content),
72
+ }
73
+ data = {
74
+ "model": "whisper-1",
75
+ "prompt": parse_prompt,
76
+ 'response_format': "text"
77
+ }
78
+
79
+ chatbot.append([f"将 {i} 发送到openai音频解析终端 (whisper),当前参数:{parse_prompt}", "正在处理 ..."])
80
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
81
+ proxies, = get_conf('proxies')
82
+ response = requests.post(url, headers=headers, files=files, data=data, proxies=proxies).text
83
+
84
+ chatbot.append(["音频解析结果", response])
85
+ history.extend(["音频解析结果", response])
86
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
87
+
88
+ i_say = f'请对下面的音频片段做概述,音频内容是 ```{response}```'
89
+ i_say_show_user = f'第{index + 1}段音频的第{j + 1} / {len(voice)}片段。'
90
+ gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
91
+ inputs=i_say,
92
+ inputs_show_user=i_say_show_user,
93
+ llm_kwargs=llm_kwargs,
94
+ chatbot=chatbot,
95
+ history=[],
96
+ sys_prompt=f"总结音频。音频文件名{fp}"
97
+ )
98
+
99
+ chatbot[-1] = (i_say_show_user, gpt_say)
100
+ history.extend([i_say_show_user, gpt_say])
101
+ audio_history.extend([i_say_show_user, gpt_say])
102
+
103
+ # 已经对该文章的所有片段总结完毕,如果文章被切分了
104
+ result = "".join(audio_history)
105
+ if len(audio_history) > 1:
106
+ i_say = f"根据以上的对话,使用中文总结音频“{result}”的主要内容。"
107
+ i_say_show_user = f'第{index + 1}段音频的主要内容:'
108
+ gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
109
+ inputs=i_say,
110
+ inputs_show_user=i_say_show_user,
111
+ llm_kwargs=llm_kwargs,
112
+ chatbot=chatbot,
113
+ history=audio_history,
114
+ sys_prompt="总结文章。"
115
+ )
116
+
117
+ history.extend([i_say, gpt_say])
118
+ audio_history.extend([i_say, gpt_say])
119
+
120
+ res = write_results_to_file(history)
121
+ chatbot.append((f"第{index + 1}段音频完成了吗?", res))
122
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
123
+
124
+ # 删除中间文件夹
125
+ import shutil
126
+ shutil.rmtree('gpt_log/mp3')
127
+ res = write_results_to_file(history)
128
+ chatbot.append(("所有音频都总结完成了吗?", res))
129
+ yield from update_ui(chatbot=chatbot, history=history)
130
+
131
+
132
+ @CatchException
133
+ def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, WEB_PORT):
134
+ import glob, os
135
+
136
+ # 基本信息:功能、贡献者
137
+ chatbot.append([
138
+ "函数插件功能?",
139
+ "总结音视频内容,函数插件贡献者: dalvqw & BinaryHusky"])
140
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
141
+
142
+ try:
143
+ from moviepy.editor import AudioFileClip
144
+ except:
145
+ report_execption(chatbot, history,
146
+ a=f"解析项目: {txt}",
147
+ b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade moviepy```。")
148
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
149
+ return
150
+
151
+ # 清空历史,以免输入溢出
152
+ history = []
153
+
154
+ # 检测输入参数,如没有给定输入参数,直接退出
155
+ if os.path.exists(txt):
156
+ project_folder = txt
157
+ else:
158
+ if txt == "": txt = '空空如也的输入栏'
159
+ report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
160
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
161
+ return
162
+
163
+ # 搜索需要处理的文件清单
164
+ extensions = ['.mp4', '.m4a', '.wav', '.mpga', '.mpeg', '.mp3', '.avi', '.mkv', '.flac', '.aac']
165
+
166
+ if txt.endswith(tuple(extensions)):
167
+ file_manifest = [txt]
168
+ else:
169
+ file_manifest = []
170
+ for extension in extensions:
171
+ file_manifest.extend(glob.glob(f'{project_folder}/**/*{extension}', recursive=True))
172
+
173
+ # 如果没找到任何文件
174
+ if len(file_manifest) == 0:
175
+ report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何音频或视频文件: {txt}")
176
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
177
+ return
178
+
179
+ # 开始正式执行任务
180
+ if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
181
+ parse_prompt = plugin_kwargs.get("advanced_arg", '将音频解析为简体中文')
182
+ yield from AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history)
183
+
184
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
crazy_functions/批量Markdown翻译.py CHANGED
@@ -84,7 +84,33 @@ def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
84
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
85
 
86
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
 
 
88
 
89
 
90
  @CatchException
@@ -98,6 +124,7 @@ def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
98
  # 尝试导入依赖,如果缺少依赖,则给出安装建议
99
  try:
100
  import tiktoken
 
101
  except:
102
  report_execption(chatbot, history,
103
  a=f"解析项目: {txt}",
@@ -105,19 +132,21 @@ def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
105
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
106
  return
107
  history = [] # 清空历史,以免输入溢出
108
- import glob, os
109
- if os.path.exists(txt):
110
- project_folder = txt
111
- else:
 
112
  if txt == "": txt = '空空如也的输入栏'
113
  report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
114
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
115
  return
116
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.md', recursive=True)]
117
  if len(file_manifest) == 0:
118
  report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
119
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
120
  return
 
121
  yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh')
122
 
123
 
@@ -135,6 +164,7 @@ def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
135
  # 尝试导入依赖,如果缺少依赖,则给出安装建议
136
  try:
137
  import tiktoken
 
138
  except:
139
  report_execption(chatbot, history,
140
  a=f"解析项目: {txt}",
@@ -142,18 +172,13 @@ def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
142
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
143
  return
144
  history = [] # 清空历史,以免输入溢出
145
- import glob, os
146
- if os.path.exists(txt):
147
- project_folder = txt
148
- else:
149
  if txt == "": txt = '空空如也的输入栏'
150
  report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
151
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
152
  return
153
- if txt.endswith('.md'):
154
- file_manifest = [txt]
155
- else:
156
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.md', recursive=True)]
157
  if len(file_manifest) == 0:
158
  report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
159
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 
84
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
85
 
86
 
87
+ def get_files_from_everything(txt):
88
+ import glob, os
89
+
90
+ success = True
91
+ if txt.startswith('http'):
92
+ # 网络的远程文件
93
+ txt = txt.replace("https://github.com/", "https://raw.githubusercontent.com/")
94
+ txt = txt.replace("/blob/", "/")
95
+ import requests
96
+ from toolbox import get_conf
97
+ proxies, = get_conf('proxies')
98
+ r = requests.get(txt, proxies=proxies)
99
+ with open('./gpt_log/temp.md', 'wb+') as f: f.write(r.content)
100
+ project_folder = './gpt_log/'
101
+ file_manifest = ['./gpt_log/temp.md']
102
+ elif txt.endswith('.md'):
103
+ # 直接给定文件
104
+ file_manifest = [txt]
105
+ project_folder = os.path.dirname(txt)
106
+ elif os.path.exists(txt):
107
+ # 本地路径,递归搜索
108
+ project_folder = txt
109
+ file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.md', recursive=True)]
110
+ else:
111
+ success = False
112
 
113
+ return success, file_manifest, project_folder
114
 
115
 
116
  @CatchException
 
124
  # 尝试导入依赖,如果缺少依赖,则给出安装建议
125
  try:
126
  import tiktoken
127
+ import glob, os
128
  except:
129
  report_execption(chatbot, history,
130
  a=f"解析项目: {txt}",
 
132
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
133
  return
134
  history = [] # 清空历史,以免输入溢出
135
+
136
+ success, file_manifest, project_folder = get_files_from_everything(txt)
137
+
138
+ if not success:
139
+ # 什么都没有
140
  if txt == "": txt = '空空如也的输入栏'
141
  report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
142
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
143
  return
144
+
145
  if len(file_manifest) == 0:
146
  report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
147
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
148
  return
149
+
150
  yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh')
151
 
152
 
 
164
  # 尝试导入依赖,如果缺少依赖,则给出安装建议
165
  try:
166
  import tiktoken
167
+ import glob, os
168
  except:
169
  report_execption(chatbot, history,
170
  a=f"解析项目: {txt}",
 
172
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
173
  return
174
  history = [] # 清空历史,以免输入溢出
175
+ success, file_manifest, project_folder = get_files_from_everything(txt)
176
+ if not success:
177
+ # 什么都没有
 
178
  if txt == "": txt = '空空如也的输入栏'
179
  report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
180
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
181
  return
 
 
 
 
182
  if len(file_manifest) == 0:
183
  report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
184
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
crazy_functions/批量总结PDF文档.py CHANGED
@@ -41,8 +41,8 @@ def clean_text(raw_text):
41
  """
42
  对从 PDF 提取出的原始文本进行清洗和格式化处理。
43
  1. 对原始文本进行归一化处理。
44
- 2. 替换跨行的连词,例如 “Espe-\ncially” 转换为 “Especially”。
45
- 3. 根据 heuristic 规则判断换行符是否是段落分隔,并相应地进行替换。
46
  """
47
  # 对文本进行归一化处理
48
  normalized_text = normalize_text(raw_text)
 
41
  """
42
  对从 PDF 提取出的原始文本进行清洗和格式化处理。
43
  1. 对原始文本进行归一化处理。
44
+ 2. 替换跨行的连词
45
+ 3. 根据 heuristic 规则判断换行符是否是段落分隔,并相应地进行替换
46
  """
47
  # 对文本进行归一化处理
48
  normalized_text = normalize_text(raw_text)
crazy_functions/批量翻译PDF文档_多线程.py CHANGED
@@ -58,14 +58,17 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys_
58
 
59
  def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, sys_prompt):
60
  import os
 
61
  import tiktoken
62
  TOKEN_LIMIT_PER_FRAGMENT = 1280
63
  generated_conclusion_files = []
 
64
  for index, fp in enumerate(file_manifest):
65
 
66
  # 读取PDF文件
67
  file_content, page_one = read_and_clean_pdf_text(fp)
68
-
 
69
  # 递归地切割PDF文件
70
  from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
71
  from request_llm.bridge_all import model_info
@@ -74,7 +77,7 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot,
74
  paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
75
  txt=file_content, get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT)
76
  page_one_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
77
- txt=str(page_one), get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT//4)
78
 
79
  # 为了更好的效果,我们剥离Introduction之后的部分(如果有)
80
  paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0]
@@ -100,15 +103,15 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot,
100
  "请你作为一个学术翻译,负责把学术论文准确翻译成中文。注意文章中的每一句话都要翻译。" for _ in paper_fragments],
101
  # max_workers=5 # OpenAI所允许的最大并行过载
102
  )
103
-
104
  # 整理报告的格式
105
- for i,k in enumerate(gpt_response_collection):
106
  if i%2==0:
107
- gpt_response_collection[i] = f"\n\n---\n\n ## 原文[{i//2}/{len(gpt_response_collection)//2}]: \n\n {paper_fragments[i//2].replace('#', '')} \n\n---\n\n ## 翻译[{i//2}/{len(gpt_response_collection)//2}]:\n "
108
  else:
109
- gpt_response_collection[i] = gpt_response_collection[i]
110
  final = ["一、论文概况\n\n---\n\n", paper_meta_info.replace('# ', '### ') + '\n\n---\n\n', "二、论文翻译", ""]
111
- final.extend(gpt_response_collection)
112
  create_report_file_name = f"{os.path.basename(fp)}.trans.md"
113
  res = write_results_to_file(final, file_name=create_report_file_name)
114
 
@@ -117,15 +120,97 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot,
117
  chatbot.append((f"{fp}完成了吗?", res))
118
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
119
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
  # 准备文件的下载
121
  import shutil
122
  for pdf_path in generated_conclusion_files:
123
  # 重命名文件
124
- rename_file = f'./gpt_log/总结论文-{os.path.basename(pdf_path)}'
125
  if os.path.exists(rename_file):
126
  os.remove(rename_file)
127
  shutil.copyfile(pdf_path, rename_file)
128
  if os.path.exists(pdf_path):
129
  os.remove(pdf_path)
130
- chatbot.append(("给出输出文件清单", str(generated_conclusion_files)))
 
 
 
 
 
 
 
 
131
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
 
59
  def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, sys_prompt):
60
  import os
61
+ import copy
62
  import tiktoken
63
  TOKEN_LIMIT_PER_FRAGMENT = 1280
64
  generated_conclusion_files = []
65
+ generated_html_files = []
66
  for index, fp in enumerate(file_manifest):
67
 
68
  # 读取PDF文件
69
  file_content, page_one = read_and_clean_pdf_text(fp)
70
+ file_content = file_content.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
71
+ page_one = str(page_one).encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
72
  # 递归地切割PDF文件
73
  from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
74
  from request_llm.bridge_all import model_info
 
77
  paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
78
  txt=file_content, get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT)
79
  page_one_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
80
+ txt=page_one, get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT//4)
81
 
82
  # 为了更好的效果,我们剥离Introduction之后的部分(如果有)
83
  paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0]
 
103
  "请你作为一个学术翻译,负责把学术论文准确翻译成中文。注意文章中的每一句话都要翻译。" for _ in paper_fragments],
104
  # max_workers=5 # OpenAI所允许的最大并行过载
105
  )
106
+ gpt_response_collection_md = copy.deepcopy(gpt_response_collection)
107
  # 整理报告的格式
108
+ for i,k in enumerate(gpt_response_collection_md):
109
  if i%2==0:
110
+ gpt_response_collection_md[i] = f"\n\n---\n\n ## 原文[{i//2}/{len(gpt_response_collection_md)//2}]: \n\n {paper_fragments[i//2].replace('#', '')} \n\n---\n\n ## 翻译[{i//2}/{len(gpt_response_collection_md)//2}]:\n "
111
  else:
112
+ gpt_response_collection_md[i] = gpt_response_collection_md[i]
113
  final = ["一、论文概况\n\n---\n\n", paper_meta_info.replace('# ', '### ') + '\n\n---\n\n', "二、论文翻译", ""]
114
+ final.extend(gpt_response_collection_md)
115
  create_report_file_name = f"{os.path.basename(fp)}.trans.md"
116
  res = write_results_to_file(final, file_name=create_report_file_name)
117
 
 
120
  chatbot.append((f"{fp}完成了吗?", res))
121
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
122
 
123
+ # write html
124
+ try:
125
+ ch = construct_html()
126
+ orig = ""
127
+ trans = ""
128
+ gpt_response_collection_html = copy.deepcopy(gpt_response_collection)
129
+ for i,k in enumerate(gpt_response_collection_html):
130
+ if i%2==0:
131
+ gpt_response_collection_html[i] = paper_fragments[i//2].replace('#', '')
132
+ else:
133
+ gpt_response_collection_html[i] = gpt_response_collection_html[i]
134
+ final = ["论文概况", paper_meta_info.replace('# ', '### '), "二、论文翻译", ""]
135
+ final.extend(gpt_response_collection_html)
136
+ for i, k in enumerate(final):
137
+ if i%2==0:
138
+ orig = k
139
+ if i%2==1:
140
+ trans = k
141
+ ch.add_row(a=orig, b=trans)
142
+ create_report_file_name = f"{os.path.basename(fp)}.trans.html"
143
+ ch.save_file(create_report_file_name)
144
+ generated_html_files.append(f'./gpt_log/{create_report_file_name}')
145
+ except:
146
+ from toolbox import trimmed_format_exc
147
+ print('writing html result failed:', trimmed_format_exc())
148
+
149
  # 准备文件的下载
150
  import shutil
151
  for pdf_path in generated_conclusion_files:
152
  # 重命名文件
153
+ rename_file = f'./gpt_log/翻译-{os.path.basename(pdf_path)}'
154
  if os.path.exists(rename_file):
155
  os.remove(rename_file)
156
  shutil.copyfile(pdf_path, rename_file)
157
  if os.path.exists(pdf_path):
158
  os.remove(pdf_path)
159
+ for html_path in generated_html_files:
160
+ # 重命名文件
161
+ rename_file = f'./gpt_log/翻译-{os.path.basename(html_path)}'
162
+ if os.path.exists(rename_file):
163
+ os.remove(rename_file)
164
+ shutil.copyfile(html_path, rename_file)
165
+ if os.path.exists(html_path):
166
+ os.remove(html_path)
167
+ chatbot.append(("给出输出文件清单", str(generated_conclusion_files + generated_html_files)))
168
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
169
+
170
+
171
+ class construct_html():
172
+ def __init__(self) -> None:
173
+ self.css = """
174
+ .row {
175
+ display: flex;
176
+ flex-wrap: wrap;
177
+ }
178
+
179
+ .column {
180
+ flex: 1;
181
+ padding: 10px;
182
+ }
183
+
184
+ .table-header {
185
+ font-weight: bold;
186
+ border-bottom: 1px solid black;
187
+ }
188
+
189
+ .table-row {
190
+ border-bottom: 1px solid lightgray;
191
+ }
192
+
193
+ .table-cell {
194
+ padding: 5px;
195
+ }
196
+ """
197
+ self.html_string = f'<!DOCTYPE html><head><meta charset="utf-8"><title>翻译结果</title><style>{self.css}</style></head>'
198
+
199
+
200
+ def add_row(self, a, b):
201
+ tmp = """
202
+ <div class="row table-row">
203
+ <div class="column table-cell">REPLACE_A</div>
204
+ <div class="column table-cell">REPLACE_B</div>
205
+ </div>
206
+ """
207
+ from toolbox import markdown_convertion
208
+ tmp = tmp.replace('REPLACE_A', markdown_convertion(a))
209
+ tmp = tmp.replace('REPLACE_B', markdown_convertion(b))
210
+ self.html_string += tmp
211
+
212
+
213
+ def save_file(self, file_name):
214
+ with open(f'./gpt_log/{file_name}', 'w', encoding='utf8') as f:
215
+ f.write(self.html_string.encode('utf-8', 'ignore').decode())
216
+
crazy_functions/解析JupyterNotebook.py CHANGED
@@ -67,11 +67,17 @@ def parseNotebook(filename, enable_markdown=1):
67
  def ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
68
  from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
69
 
 
 
 
 
 
 
 
70
  pfg = PaperFileGroup()
71
 
72
- print(file_manifest)
73
  for fp in file_manifest:
74
- file_content = parseNotebook(fp, enable_markdown=1)
75
  pfg.file_paths.append(fp)
76
  pfg.file_contents.append(file_content)
77
 
 
67
  def ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
68
  from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
69
 
70
+ if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
71
+ enable_markdown = plugin_kwargs.get("advanced_arg", "1")
72
+ try:
73
+ enable_markdown = int(enable_markdown)
74
+ except ValueError:
75
+ enable_markdown = 1
76
+
77
  pfg = PaperFileGroup()
78
 
 
79
  for fp in file_manifest:
80
+ file_content = parseNotebook(fp, enable_markdown=enable_markdown)
81
  pfg.file_paths.append(fp)
82
  pfg.file_contents.append(file_content)
83
 
crazy_functions/解析项目源代码.py CHANGED
@@ -1,5 +1,6 @@
1
  from toolbox import update_ui
2
  from toolbox import CatchException, report_execption, write_results_to_file
 
3
 
4
  def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
5
  import os, copy
@@ -61,13 +62,15 @@ def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
61
  previous_iteration_files.extend([os.path.relpath(fp, project_folder) for index, fp in enumerate(this_iteration_file_manifest)])
62
  previous_iteration_files_string = ', '.join(previous_iteration_files)
63
  current_iteration_focus = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(this_iteration_file_manifest)])
64
- i_say = f'根据以上分析,对程序的整体功能和构架重新做出概括。然后用一张markdown表格整理每个文件的功能(包括{previous_iteration_files_string})。'
65
  inputs_show_user = f'根据以上分析,对程序的整体功能和构架重新做出概括,由于输入长度限制,可能需要分组处理,本组文件为 {current_iteration_focus} + 已经汇总的文件组。'
66
  this_iteration_history = copy.deepcopy(this_iteration_gpt_response_collection)
67
  this_iteration_history.append(last_iteration_result)
 
 
68
  result = yield from request_gpt_model_in_new_thread_with_ui_alive(
69
- inputs=i_say, inputs_show_user=inputs_show_user, llm_kwargs=llm_kwargs, chatbot=chatbot,
70
- history=this_iteration_history, # 迭代之前的分析
71
  sys_prompt="你是一个程序架构分析师,正在分析一个项目的源代码。")
72
  report_part_2.extend([i_say, result])
73
  last_iteration_result = result
@@ -180,7 +183,7 @@ def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys
180
 
181
 
182
  @CatchException
183
- def 解析一个Rect项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
184
  history = [] # 清空历史,以免输入溢出
185
  import glob, os
186
  if os.path.exists(txt):
@@ -194,9 +197,15 @@ def 解析一个Rect项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys
194
  [f for f in glob.glob(f'{project_folder}/**/*.tsx', recursive=True)] + \
195
  [f for f in glob.glob(f'{project_folder}/**/*.json', recursive=True)] + \
196
  [f for f in glob.glob(f'{project_folder}/**/*.js', recursive=True)] + \
 
 
 
 
 
 
197
  [f for f in glob.glob(f'{project_folder}/**/*.jsx', recursive=True)]
198
  if len(file_manifest) == 0:
199
- report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何Rect文件: {txt}")
200
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
201
  return
202
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
@@ -223,6 +232,25 @@ def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
223
  return
224
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
225
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
 
227
  @CatchException
228
  def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
 
1
  from toolbox import update_ui
2
  from toolbox import CatchException, report_execption, write_results_to_file
3
+ from .crazy_utils import input_clipping
4
 
5
  def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
6
  import os, copy
 
62
  previous_iteration_files.extend([os.path.relpath(fp, project_folder) for index, fp in enumerate(this_iteration_file_manifest)])
63
  previous_iteration_files_string = ', '.join(previous_iteration_files)
64
  current_iteration_focus = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(this_iteration_file_manifest)])
65
+ i_say = f'用一张Markdown表格简要描述以下文件的功能:{previous_iteration_files_string}。根据以上分析,用一句话概括程序的整体功能。'
66
  inputs_show_user = f'根据以上分析,对程序的整体功能和构架重新做出概括,由于输入长度限制,可能需要分组处理,本组文件为 {current_iteration_focus} + 已经汇总的文件组。'
67
  this_iteration_history = copy.deepcopy(this_iteration_gpt_response_collection)
68
  this_iteration_history.append(last_iteration_result)
69
+ # 裁剪input
70
+ inputs, this_iteration_history_feed = input_clipping(inputs=i_say, history=this_iteration_history, max_token_limit=2560)
71
  result = yield from request_gpt_model_in_new_thread_with_ui_alive(
72
+ inputs=inputs, inputs_show_user=inputs_show_user, llm_kwargs=llm_kwargs, chatbot=chatbot,
73
+ history=this_iteration_history_feed, # 迭代之前的分析
74
  sys_prompt="你是一个程序架构分析师,正在分析一个项目的源代码。")
75
  report_part_2.extend([i_say, result])
76
  last_iteration_result = result
 
183
 
184
 
185
  @CatchException
186
+ def 解析一个前端项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
187
  history = [] # 清空历史,以免输入溢出
188
  import glob, os
189
  if os.path.exists(txt):
 
197
  [f for f in glob.glob(f'{project_folder}/**/*.tsx', recursive=True)] + \
198
  [f for f in glob.glob(f'{project_folder}/**/*.json', recursive=True)] + \
199
  [f for f in glob.glob(f'{project_folder}/**/*.js', recursive=True)] + \
200
+ [f for f in glob.glob(f'{project_folder}/**/*.vue', recursive=True)] + \
201
+ [f for f in glob.glob(f'{project_folder}/**/*.less', recursive=True)] + \
202
+ [f for f in glob.glob(f'{project_folder}/**/*.sass', recursive=True)] + \
203
+ [f for f in glob.glob(f'{project_folder}/**/*.wxml', recursive=True)] + \
204
+ [f for f in glob.glob(f'{project_folder}/**/*.wxss', recursive=True)] + \
205
+ [f for f in glob.glob(f'{project_folder}/**/*.css', recursive=True)] + \
206
  [f for f in glob.glob(f'{project_folder}/**/*.jsx', recursive=True)]
207
  if len(file_manifest) == 0:
208
+ report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何前端相关文件: {txt}")
209
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
210
  return
211
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
 
232
  return
233
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
234
 
235
+ @CatchException
236
+ def 解析一个Rust项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
237
+ history = [] # 清空历史,以免输入溢出
238
+ import glob, os
239
+ if os.path.exists(txt):
240
+ project_folder = txt
241
+ else:
242
+ if txt == "": txt = '空空如也的输入栏'
243
+ report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
244
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
245
+ return
246
+ file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.rs', recursive=True)] + \
247
+ [f for f in glob.glob(f'{project_folder}/**/*.toml', recursive=True)] + \
248
+ [f for f in glob.glob(f'{project_folder}/**/*.lock', recursive=True)]
249
+ if len(file_manifest) == 0:
250
+ report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}")
251
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
252
+ return
253
+ yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
254
 
255
  @CatchException
256
  def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
crazy_functions/询问多个大语言模型.py CHANGED
@@ -45,6 +45,7 @@ def 同时问询_指定模型(txt, llm_kwargs, plugin_kwargs, chatbot, history,
45
  chatbot.append((txt, "正在同时咨询ChatGPT和ChatGLM……"))
46
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
47
 
 
48
  # llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔
49
  llm_kwargs['llm_model'] = plugin_kwargs.get("advanced_arg", 'chatglm&gpt-3.5-turbo') # 'chatglm&gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔
50
  gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
 
45
  chatbot.append((txt, "正在同时咨询ChatGPT和ChatGLM……"))
46
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
47
 
48
+ if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
49
  # llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔
50
  llm_kwargs['llm_model'] = plugin_kwargs.get("advanced_arg", 'chatglm&gpt-3.5-turbo') # 'chatglm&gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔
51
  gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
crazy_functions/谷歌检索小助手.py CHANGED
@@ -36,14 +36,18 @@ def get_meta_information(url, chatbot, history):
36
  max_results = 1,
37
  sort_by = arxiv.SortCriterion.Relevance,
38
  )
39
- paper = next(search.results())
40
- if string_similar(title, paper.title) > 0.90: # same paper
41
- abstract = paper.summary.replace('\n', ' ')
42
- is_paper_in_arxiv = True
43
- else: # different paper
 
 
 
 
 
44
  abstract = abstract
45
  is_paper_in_arxiv = False
46
- paper = next(search.results())
47
  print(title)
48
  print(author)
49
  print(citation)
@@ -70,6 +74,7 @@ def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
70
  # 尝试导入依赖,如果缺少依赖,则给出安装建议
71
  try:
72
  import arxiv
 
73
  from bs4 import BeautifulSoup
74
  except:
75
  report_execption(chatbot, history,
@@ -80,25 +85,26 @@ def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
80
 
81
  # 清空历史,以免输入溢出
82
  history = []
83
-
84
  meta_paper_info_list = yield from get_meta_information(txt, chatbot, history)
 
 
 
 
 
 
85
 
86
- if len(meta_paper_info_list[:10]) > 0:
87
- i_say = "下面是一些学术文献的数据,请从中提取出以下内容。" + \
88
- "1、英文题目;2、中文题目翻译;3、作者;4、arxiv公开(is_paper_in_arxiv);4、引用数量(cite);5、中文摘要翻译。" + \
89
- f"以下是信息源:{str(meta_paper_info_list[:10])}"
90
-
91
- inputs_show_user = f"请分析此页面中出现的所有文章:{txt}"
92
- gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
93
- inputs=i_say, inputs_show_user=inputs_show_user,
94
- llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
95
- sys_prompt="你是一个学术翻译,请从数据中提取信息。你必须使用Markdown格式。你必须逐个文献进行处理。"
96
- )
97
 
98
- history.extend([ "第一批", gpt_say ])
99
- meta_paper_info_list = meta_paper_info_list[10:]
100
 
101
- chatbot.append(["状态?", "已经全部完成"])
 
102
  msg = '正常'
103
  yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
104
  res = write_results_to_file(history)
 
36
  max_results = 1,
37
  sort_by = arxiv.SortCriterion.Relevance,
38
  )
39
+ try:
40
+ paper = next(search.results())
41
+ if string_similar(title, paper.title) > 0.90: # same paper
42
+ abstract = paper.summary.replace('\n', ' ')
43
+ is_paper_in_arxiv = True
44
+ else: # different paper
45
+ abstract = abstract
46
+ is_paper_in_arxiv = False
47
+ paper = next(search.results())
48
+ except:
49
  abstract = abstract
50
  is_paper_in_arxiv = False
 
51
  print(title)
52
  print(author)
53
  print(citation)
 
74
  # 尝试导入依赖,如果缺少依赖,则给出安装建议
75
  try:
76
  import arxiv
77
+ import math
78
  from bs4 import BeautifulSoup
79
  except:
80
  report_execption(chatbot, history,
 
85
 
86
  # 清空历史,以免输入溢出
87
  history = []
 
88
  meta_paper_info_list = yield from get_meta_information(txt, chatbot, history)
89
+ batchsize = 5
90
+ for batch in range(math.ceil(len(meta_paper_info_list)/batchsize)):
91
+ if len(meta_paper_info_list[:batchsize]) > 0:
92
+ i_say = "下面是一些学术文献的数据,提取出以下内容:" + \
93
+ "1、英文题目;2、中文题目翻译;3、作者;4、arxiv公开(is_paper_in_arxiv);4、引用数量(cite);5、中文摘要翻译。" + \
94
+ f"以下是信息源:{str(meta_paper_info_list[:batchsize])}"
95
 
96
+ inputs_show_user = f"请分析此页面中出现的所有文章:{txt},这是第{batch+1}批"
97
+ gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
98
+ inputs=i_say, inputs_show_user=inputs_show_user,
99
+ llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
100
+ sys_prompt="你是一个学术翻译,请从数据中提取信息。你必须使用Markdown表格。你必须逐个文献进行处理。"
101
+ )
 
 
 
 
 
102
 
103
+ history.extend([ f"第{batch+1}批", gpt_say ])
104
+ meta_paper_info_list = meta_paper_info_list[batchsize:]
105
 
106
+ chatbot.append(["状态?",
107
+ "已经全部完成,您可以试试让AI写一个Related Works,例如您可以继续输入Write a \"Related Works\" section about \"你搜索的研究领域\" for me."])
108
  msg = '正常'
109
  yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
110
  res = write_results_to_file(history)
docker-compose.yml ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #【请修改完参数后,删除此行】请在以下方案中选择一种,然后删除其他的方案,最后docker-compose up运行 | Please choose from one of these options below, delete other options as well as This Line
2
+
3
+ ## ===================================================
4
+ ## 【方案一】 如果不需要运行本地模型(仅chatgpt,newbing类远程服务)
5
+ ## ===================================================
6
+ version: '3'
7
+ services:
8
+ gpt_academic_nolocalllms:
9
+ image: ghcr.io/binary-husky/gpt_academic_nolocal:master
10
+ environment:
11
+ # 请查阅 `config.py` 以查看所有的配置信息
12
+ API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx '
13
+ USE_PROXY: ' True '
14
+ proxies: ' { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } '
15
+ LLM_MODEL: ' gpt-3.5-turbo '
16
+ AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "newbing"] '
17
+ WEB_PORT: ' 22303 '
18
+ ADD_WAIFU: ' True '
19
+ # DEFAULT_WORKER_NUM: ' 10 '
20
+ # AUTHENTICATION: ' [("username", "passwd"), ("username2", "passwd2")] '
21
+
22
+ # 与宿主的网络融合
23
+ network_mode: "host"
24
+
25
+ # 不使用代理网络拉取最新代码
26
+ command: >
27
+ bash -c "python3 -u main.py"
28
+
29
+
30
+ ### ===================================================
31
+ ### 【方案二】 如果需要运行ChatGLM本地模型
32
+ ### ===================================================
33
+ version: '3'
34
+ services:
35
+ gpt_academic_with_chatglm:
36
+ image: ghcr.io/binary-husky/gpt_academic_chatglm_moss:master
37
+ environment:
38
+ # 请查阅 `config.py` 以查看所有的配置信息
39
+ API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,fkxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx '
40
+ USE_PROXY: ' True '
41
+ proxies: ' { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } '
42
+ LLM_MODEL: ' gpt-3.5-turbo '
43
+ AVAIL_LLM_MODELS: ' ["chatglm", "moss", "gpt-3.5-turbo", "gpt-4", "newbing"] '
44
+ LOCAL_MODEL_DEVICE: ' cuda '
45
+ DEFAULT_WORKER_NUM: ' 10 '
46
+ WEB_PORT: ' 12303 '
47
+ ADD_WAIFU: ' True '
48
+ # AUTHENTICATION: ' [("username", "passwd"), ("username2", "passwd2")] '
49
+
50
+ # 显卡的使用,nvidia0指第0个GPU
51
+ runtime: nvidia
52
+ devices:
53
+ - /dev/nvidia0:/dev/nvidia0
54
+
55
+ # 与宿主的网络融合
56
+ network_mode: "host"
57
+ command: >
58
+ bash -c "python3 -u main.py"
59
+
60
+ ### ===================================================
61
+ ### 【方案三】 如果需要运行ChatGPT + LLAMA + 盘古 + RWKV本地模型
62
+ ### ===================================================
63
+ version: '3'
64
+ services:
65
+ gpt_academic_with_rwkv:
66
+ image: fuqingxu/gpt_academic:jittorllms # [option 2] 如果需要运行ChatGLM本地模型
67
+ environment:
68
+ # 请查阅 `config.py` 以查看所有的配置信息
69
+ API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,fkxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx '
70
+ USE_PROXY: ' True '
71
+ proxies: ' { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } '
72
+ LLM_MODEL: ' gpt-3.5-turbo '
73
+ AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "newbing", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] '
74
+ LOCAL_MODEL_DEVICE: ' cuda '
75
+ DEFAULT_WORKER_NUM: ' 10 '
76
+ WEB_PORT: ' 12305 '
77
+ ADD_WAIFU: ' True '
78
+ # AUTHENTICATION: ' [("username", "passwd"), ("username2", "passwd2")] '
79
+
80
+ # 显卡的使用,nvidia0指第0个GPU
81
+ runtime: nvidia
82
+ devices:
83
+ - /dev/nvidia0:/dev/nvidia0
84
+
85
+ # 与宿主的网络融合
86
+ network_mode: "host"
87
+
88
+ # 使用代理网络拉取最新代码
89
+ # command: >
90
+ # bash -c " truncate -s -1 /etc/proxychains.conf &&
91
+ # echo \"socks5 127.0.0.1 10880\" >> /etc/proxychains.conf &&
92
+ # echo '[gpt-academic] 正在从github拉取最新代码...' &&
93
+ # proxychains git pull &&
94
+ # echo '[jittorllms] 正在从github拉取最新代码...' &&
95
+ # proxychains git --git-dir=request_llm/jittorllms/.git --work-tree=request_llm/jittorllms pull --force &&
96
+ # python3 -u main.py"
97
+
98
+ # 不使用代理网络拉取最新代码
99
+ command: >
100
+ bash -c " echo '[gpt-academic] 正在从github拉取最新代码...' &&
101
+ git pull &&
102
+ echo '[jittorllms] 正在从github拉取最新代码...' &&
103
+ git --git-dir=request_llm/jittorllms/.git --work-tree=request_llm/jittorllms pull --force &&
104
+ python3 -u main.py"
docs/Dockerfile+ChatGLM CHANGED
@@ -1,6 +1,6 @@
1
  # How to build | 如何构建: docker build -t gpt-academic --network=host -f Dockerfile+ChatGLM .
2
- # How to run | 如何运行 (1) 直接运行(选择0号GPU): docker run --rm -it --net=host --gpus="0" gpt-academic
3
- # How to run | 如何运行 (2) 我想运行之前进容器做一些调整: docker run --rm -it --net=host --gpus="0" gpt-academic bash
4
 
5
  # 从NVIDIA源,从而支持显卡运损(检查宿主的nvidia-smi中的cuda版本必须>=11.3)
6
  FROM nvidia/cuda:11.3.1-runtime-ubuntu20.04
@@ -14,6 +14,7 @@ RUN apt-get install -y git python python3 python-dev python3-dev --fix-missing
14
  RUN $useProxyNetwork curl cip.cc
15
  RUN sed -i '$ d' /etc/proxychains.conf
16
  RUN sed -i '$ d' /etc/proxychains.conf
 
17
  RUN echo "socks5 127.0.0.1 10880" >> /etc/proxychains.conf
18
  ARG useProxyNetwork=proxychains
19
  # # comment out above if you do not need proxy network | 如果不需要翻墙 - 从此行向上删除
@@ -21,14 +22,15 @@ ARG useProxyNetwork=proxychains
21
 
22
  # use python3 as the system default python
23
  RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
24
-
 
25
  # 下载分支
26
  WORKDIR /gpt
27
  RUN $useProxyNetwork git clone https://github.com/binary-husky/chatgpt_academic.git
28
  WORKDIR /gpt/chatgpt_academic
29
  RUN $useProxyNetwork python3 -m pip install -r requirements.txt
30
  RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_chatglm.txt
31
- RUN $useProxyNetwork python3 -m pip install torch --extra-index-url https://download.pytorch.org/whl/cu113
32
 
33
  # 预热CHATGLM参数(非必要 可选步骤)
34
  RUN echo ' \n\
@@ -48,6 +50,7 @@ RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
48
  # 可同时填写多个API-KEY,支持openai的key和api2d的key共存,用英文逗号分割,例如API_KEY = "sk-openaikey1,fkxxxx-api2dkey2,........"
49
  # LLM_MODEL 是选择初始的模型
50
  # LOCAL_MODEL_DEVICE 是选择chatglm等本地模型运行的设备,可选 cpu 和 cuda
 
51
  RUN echo ' \n\
52
  API_KEY = "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,fkxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" \n\
53
  USE_PROXY = True \n\
 
1
  # How to build | 如何构建: docker build -t gpt-academic --network=host -f Dockerfile+ChatGLM .
2
+ # How to run | (1) 我想直接一键运行(选择0号GPU): docker run --rm -it --net=host --gpus \"device=0\" gpt-academic
3
+ # How to run | (2) 我想运行之前进容器做一些调整(选择1号GPU): docker run --rm -it --net=host --gpus \"device=1\" gpt-academic bash
4
 
5
  # 从NVIDIA源,从而支持显卡运损(检查宿主的nvidia-smi中的cuda版本必须>=11.3)
6
  FROM nvidia/cuda:11.3.1-runtime-ubuntu20.04
 
14
  RUN $useProxyNetwork curl cip.cc
15
  RUN sed -i '$ d' /etc/proxychains.conf
16
  RUN sed -i '$ d' /etc/proxychains.conf
17
+ # 在这里填写主机的代理协议(用于从github拉取代码)
18
  RUN echo "socks5 127.0.0.1 10880" >> /etc/proxychains.conf
19
  ARG useProxyNetwork=proxychains
20
  # # comment out above if you do not need proxy network | 如果不需要翻墙 - 从此行向上删除
 
22
 
23
  # use python3 as the system default python
24
  RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
25
+ # 下载pytorch
26
+ RUN $useProxyNetwork python3 -m pip install torch --extra-index-url https://download.pytorch.org/whl/cu113
27
  # 下载分支
28
  WORKDIR /gpt
29
  RUN $useProxyNetwork git clone https://github.com/binary-husky/chatgpt_academic.git
30
  WORKDIR /gpt/chatgpt_academic
31
  RUN $useProxyNetwork python3 -m pip install -r requirements.txt
32
  RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_chatglm.txt
33
+ RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_newbing.txt
34
 
35
  # 预热CHATGLM参数(非必要 可选步骤)
36
  RUN echo ' \n\
 
50
  # 可同时填写多个API-KEY,支持openai的key和api2d的key共存,用英文逗号分割,例如API_KEY = "sk-openaikey1,fkxxxx-api2dkey2,........"
51
  # LLM_MODEL 是选择初始的模型
52
  # LOCAL_MODEL_DEVICE 是选择chatglm等本地模型运行的设备,可选 cpu 和 cuda
53
+ # [说明: 以下内容与`config.py`一一对应,请查阅config.py来完成一下配置的填写]
54
  RUN echo ' \n\
55
  API_KEY = "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,fkxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" \n\
56
  USE_PROXY = True \n\
docs/Dockerfile+JittorLLM ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # How to build | 如何构建: docker build -t gpt-academic-jittor --network=host -f Dockerfile+ChatGLM .
2
+ # How to run | (1) 我想直接一键运行(选择0号GPU): docker run --rm -it --net=host --gpus \"device=0\" gpt-academic-jittor bash
3
+ # How to run | (2) 我想运行之前进容器做一些调整(选择1号GPU): docker run --rm -it --net=host --gpus \"device=1\" gpt-academic-jittor bash
4
+
5
+ # 从NVIDIA源,从而支持显卡运损(检查宿主的nvidia-smi中的cuda版本必须>=11.3)
6
+ FROM nvidia/cuda:11.3.1-runtime-ubuntu20.04
7
+ ARG useProxyNetwork=''
8
+ RUN apt-get update
9
+ RUN apt-get install -y curl proxychains curl g++
10
+ RUN apt-get install -y git python python3 python-dev python3-dev --fix-missing
11
+
12
+ # 配置代理网络(构建Docker镜像时使用)
13
+ # # comment out below if you do not need proxy network | 如果不需要翻墙 - 从此行向下删除
14
+ RUN $useProxyNetwork curl cip.cc
15
+ RUN sed -i '$ d' /etc/proxychains.conf
16
+ RUN sed -i '$ d' /etc/proxychains.conf
17
+ # 在这里填写主机的代理协议(用于从github拉取代码)
18
+ RUN echo "socks5 127.0.0.1 10880" >> /etc/proxychains.conf
19
+ ARG useProxyNetwork=proxychains
20
+ # # comment out above if you do not need proxy network | 如果不需要翻墙 - 从此行向上删除
21
+
22
+
23
+ # use python3 as the system default python
24
+ RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
25
+ # 下载pytorch
26
+ RUN $useProxyNetwork python3 -m pip install torch --extra-index-url https://download.pytorch.org/whl/cu113
27
+ # 下载分支
28
+ WORKDIR /gpt
29
+ RUN $useProxyNetwork git clone https://github.com/binary-husky/chatgpt_academic.git -b jittor
30
+ WORKDIR /gpt/chatgpt_academic
31
+ RUN $useProxyNetwork python3 -m pip install -r requirements.txt
32
+ RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_chatglm.txt
33
+ RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_newbing.txt
34
+ RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I
35
+
36
+ # 下载JittorLLMs
37
+ RUN $useProxyNetwork git clone https://github.com/binary-husky/JittorLLMs.git --depth 1 request_llm/jittorllms
38
+
39
+ # 禁用缓存,确保更新代码
40
+ ADD "https://www.random.org/cgi-bin/randbyte?nbytes=10&format=h" skipcache
41
+ RUN $useProxyNetwork git pull
42
+
43
+ # 预热Tiktoken模块
44
+ RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
45
+
46
+ # 为chatgpt-academic配置代理和API-KEY (非必要 可选步骤)
47
+ # 可同时填写多个API-KEY,支持openai的key和api2d的key共存,用英文逗号分割,例如API_KEY = "sk-openaikey1,fkxxxx-api2dkey2,........"
48
+ # LLM_MODEL 是选择初始的模型
49
+ # LOCAL_MODEL_DEVICE 是选择chatglm等本地模型运行的设备,可选 cpu 和 cuda
50
+ # [说明: 以下内容与`config.py`一一对应,请查阅config.py来完成一下配置的填写]
51
+ RUN echo ' \n\
52
+ API_KEY = "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,fkxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" \n\
53
+ USE_PROXY = True \n\
54
+ LLM_MODEL = "chatglm" \n\
55
+ LOCAL_MODEL_DEVICE = "cuda" \n\
56
+ proxies = { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } ' >> config_private.py
57
+
58
+ # 启动
59
+ CMD ["python3", "-u", "main.py"]
docs/GithubAction+ChatGLM+Moss ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # 从NVIDIA源,从而支持显卡运损(检查宿主的nvidia-smi中的cuda版本必须>=11.3)
3
+ FROM nvidia/cuda:11.3.1-runtime-ubuntu20.04
4
+ ARG useProxyNetwork=''
5
+ RUN apt-get update
6
+ RUN apt-get install -y curl proxychains curl gcc
7
+ RUN apt-get install -y git python python3 python-dev python3-dev --fix-missing
8
+
9
+
10
+ # use python3 as the system default python
11
+ RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
12
+ # 下载pytorch
13
+ RUN python3 -m pip install torch --extra-index-url https://download.pytorch.org/whl/cu113
14
+ # 下载分支
15
+ WORKDIR /gpt
16
+ RUN git clone https://github.com/binary-husky/chatgpt_academic.git
17
+ WORKDIR /gpt/chatgpt_academic
18
+ RUN git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss
19
+ RUN python3 -m pip install -r requirements.txt
20
+ RUN python3 -m pip install -r request_llm/requirements_moss.txt
21
+ RUN python3 -m pip install -r request_llm/requirements_chatglm.txt
22
+ RUN python3 -m pip install -r request_llm/requirements_newbing.txt
23
+
24
+
25
+
26
+ # 预热Tiktoken模块
27
+ RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
28
+
29
+ # 启动
30
+ CMD ["python3", "-u", "main.py"]
docs/GithubAction+JittorLLMs ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 从NVIDIA源,从而支持显卡运损(检查宿主的nvidia-smi中的cuda版本必须>=11.3)
2
+ FROM nvidia/cuda:11.3.1-runtime-ubuntu20.04
3
+ ARG useProxyNetwork=''
4
+ RUN apt-get update
5
+ RUN apt-get install -y curl proxychains curl g++
6
+ RUN apt-get install -y git python python3 python-dev python3-dev --fix-missing
7
+
8
+ # use python3 as the system default python
9
+ RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
10
+
11
+ # 下载pytorch
12
+ RUN python3 -m pip install torch --extra-index-url https://download.pytorch.org/whl/cu113
13
+
14
+ # 下载分支
15
+ WORKDIR /gpt
16
+ RUN git clone https://github.com/binary-husky/chatgpt_academic.git -b jittor
17
+ WORKDIR /gpt/chatgpt_academic
18
+ RUN python3 -m pip install -r requirements.txt
19
+ RUN python3 -m pip install -r request_llm/requirements_chatglm.txt
20
+ RUN python3 -m pip install -r request_llm/requirements_newbing.txt
21
+ RUN python3 -m pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I
22
+
23
+ # 下载JittorLLMs
24
+ RUN git clone https://github.com/binary-husky/JittorLLMs.git --depth 1 request_llm/jittorllms
25
+
26
+ # 禁用缓存,确保更新代码
27
+ ADD "https://www.random.org/cgi-bin/randbyte?nbytes=10&format=h" skipcache
28
+ RUN git pull
29
+
30
+ # 预热Tiktoken模块
31
+ RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
32
+
33
+ # 启动
34
+ CMD ["python3", "-u", "main.py"]
docs/GithubAction+NoLocal ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM
2
+ # 如何构建: 先修改 `config.py`, 然后 docker build -t gpt-academic-nolocal -f docs/Dockerfile+NoLocal .
3
+ # 如何运行: docker run --rm -it --net=host gpt-academic-nolocal
4
+ FROM python:3.11
5
+
6
+ # 指定路径
7
+ WORKDIR /gpt
8
+
9
+ # 装载项目文件
10
+ COPY . .
11
+
12
+ # 安装依赖
13
+ RUN pip3 install -r requirements.txt
14
+
15
+
16
+ # 可选步骤,用于预热模块
17
+ RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
18
+
19
+ # 启动
20
+ CMD ["python3", "-u", "main.py"]
docs/self_analysis.md CHANGED
@@ -157,7 +157,7 @@
157
 
158
  ## [22/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\解析项目源代码.py
159
 
160
- 这个程序文件实现了对一个源代码项目进行分析的功能。其中,函数`解析项目本身`、`解析一个Python项目`、`解析一个C项目的头文件`、`解析一个C项目`、`解析一个Java项目`和`解析一个Rect项目`分别用于解析不同类型的项目。函数`解析源代码新`实现了对每一个源代码文件的分析,并将分析结果汇总,同时还实现了分组和迭代处理,提高了效率。最后,函数`write_results_to_file`将所有分析结果写入文件。中间,还用到了`request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency`和`request_gpt_model_in_new_thread_with_ui_alive`来完成请求和响应,并用`update_ui`实时更新界面。
161
 
162
  ## [23/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\询问多个大语言模型.py
163
 
 
157
 
158
  ## [22/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\解析项目源代码.py
159
 
160
+ 这个程序文件实现了对一个源代码项目进行分析的功能。其中,函数`解析项目本身`、`解析一个Python项目`、`解析一个C项目的头文件`、`解析一个C项目`、`解析一个Java项目`和`解析前端项目`分别用于解析不同类型的项目。函数`解析源代码新`实现了对每一个源代码文件的分析,并将分析结果汇总,同时还实现了分组和迭代处理,提高了效率。最后,函数`write_results_to_file`将所有分析结果写入文件。中间,还用到了`request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency`和`request_gpt_model_in_new_thread_with_ui_alive`来完成请求和响应,并用`update_ui`实时更新界面。
161
 
162
  ## [23/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\询问多个大语言模型.py
163
 
docs/test_markdown_format.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ sample = """
2
+ [1]: https://baike.baidu.com/item/%E8%B4%A8%E8%83%BD%E6%96%B9%E7%A8%8B/1884527 "质能方程(质能方程式)_百度百科"
3
+ [2]: https://www.zhihu.com/question/348249281 "如何理解质能方程 E=mc²? - 知乎"
4
+ [3]: https://zhuanlan.zhihu.com/p/32597385 "质能方程的推导与理解 - 知乎 - 知乎专栏"
5
+
6
+ 你好,这是必应。质能方程是描述质量与能量之间的当量关系的方程[^1^][1]。用tex格式,质能方程可以写成$$E=mc^2$$,其中$E$是能量,$m$是质量,$c$是光速[^2^][2] [^3^][3]。
7
+ """
8
+ import re
9
+
10
+ def preprocess_newbing_out(s):
11
+ pattern = r'\^(\d+)\^' # 匹配^数字^
12
+ pattern2 = r'\[(\d+)\]' # 匹配^数字^
13
+ sub = lambda m: '\['+m.group(1)+'\]' # 将匹配到的数字作为替换值
14
+ result = re.sub(pattern, sub, s) # 替换操作
15
+ if '[1]' in result:
16
+ result += '<br/><hr style="border-top: dotted 1px #44ac5c;"><br/><small>' + "<br/>".join([re.sub(pattern2, sub, r) for r in result.split('\n') if r.startswith('[')]) + '</small>'
17
+ return result
18
+
19
+
20
+ def close_up_code_segment_during_stream(gpt_reply):
21
+ """
22
+ 在gpt输出代码的中途(输出了前面的```,但还没输出完后面的```),补上后面的```
23
+
24
+ Args:
25
+ gpt_reply (str): GPT模型返回的回复字符串。
26
+
27
+ Returns:
28
+ str: 返回一个新的字符串,将输出代码片段的“后面的```”补上。
29
+
30
+ """
31
+ if '```' not in gpt_reply:
32
+ return gpt_reply
33
+ if gpt_reply.endswith('```'):
34
+ return gpt_reply
35
+
36
+ # 排除了以上两个情况,我们
37
+ segments = gpt_reply.split('```')
38
+ n_mark = len(segments) - 1
39
+ if n_mark % 2 == 1:
40
+ # print('输出代码片段中!')
41
+ return gpt_reply+'\n```'
42
+ else:
43
+ return gpt_reply
44
+
45
+ import markdown
46
+ from latex2mathml.converter import convert as tex2mathml
47
+ from functools import wraps, lru_cache
48
+ def markdown_convertion(txt):
49
+ """
50
+ 将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。
51
+ """
52
+ pre = '<div class="markdown-body">'
53
+ suf = '</div>'
54
+ if txt.startswith(pre) and txt.endswith(suf):
55
+ # print('警告,输入了已经经过转化的字符串,二次转化可能出问题')
56
+ return txt # 已经被转化过,不需要再次转化
57
+
58
+ markdown_extension_configs = {
59
+ 'mdx_math': {
60
+ 'enable_dollar_delimiter': True,
61
+ 'use_gitlab_delimiters': False,
62
+ },
63
+ }
64
+ find_equation_pattern = r'<script type="math/tex(?:.*?)>(.*?)</script>'
65
+
66
+ def tex2mathml_catch_exception(content, *args, **kwargs):
67
+ try:
68
+ content = tex2mathml(content, *args, **kwargs)
69
+ except:
70
+ content = content
71
+ return content
72
+
73
+ def replace_math_no_render(match):
74
+ content = match.group(1)
75
+ if 'mode=display' in match.group(0):
76
+ content = content.replace('\n', '</br>')
77
+ return f"<font color=\"#00FF00\">$$</font><font color=\"#FF00FF\">{content}</font><font color=\"#00FF00\">$$</font>"
78
+ else:
79
+ return f"<font color=\"#00FF00\">$</font><font color=\"#FF00FF\">{content}</font><font color=\"#00FF00\">$</font>"
80
+
81
+ def replace_math_render(match):
82
+ content = match.group(1)
83
+ if 'mode=display' in match.group(0):
84
+ if '\\begin{aligned}' in content:
85
+ content = content.replace('\\begin{aligned}', '\\begin{array}')
86
+ content = content.replace('\\end{aligned}', '\\end{array}')
87
+ content = content.replace('&', ' ')
88
+ content = tex2mathml_catch_exception(content, display="block")
89
+ return content
90
+ else:
91
+ return tex2mathml_catch_exception(content)
92
+
93
+ def markdown_bug_hunt(content):
94
+ """
95
+ 解决一个mdx_math的bug(单$包裹begin命令时多余<script>)
96
+ """
97
+ content = content.replace('<script type="math/tex">\n<script type="math/tex; mode=display">', '<script type="math/tex; mode=display">')
98
+ content = content.replace('</script>\n</script>', '</script>')
99
+ return content
100
+
101
+
102
+ if ('$' in txt) and ('```' not in txt): # 有$标识的公式符号,且没有代码段```的标识
103
+ # convert everything to html format
104
+ split = markdown.markdown(text='---')
105
+ convert_stage_1 = markdown.markdown(text=txt, extensions=['mdx_math', 'fenced_code', 'tables', 'sane_lists'], extension_configs=markdown_extension_configs)
106
+ convert_stage_1 = markdown_bug_hunt(convert_stage_1)
107
+ # re.DOTALL: Make the '.' special character match any character at all, including a newline; without this flag, '.' will match anything except a newline. Corresponds to the inline flag (?s).
108
+ # 1. convert to easy-to-copy tex (do not render math)
109
+ convert_stage_2_1, n = re.subn(find_equation_pattern, replace_math_no_render, convert_stage_1, flags=re.DOTALL)
110
+ # 2. convert to rendered equation
111
+ convert_stage_2_2, n = re.subn(find_equation_pattern, replace_math_render, convert_stage_1, flags=re.DOTALL)
112
+ # cat them together
113
+ return pre + convert_stage_2_1 + f'{split}' + convert_stage_2_2 + suf
114
+ else:
115
+ return pre + markdown.markdown(txt, extensions=['fenced_code', 'codehilite', 'tables', 'sane_lists']) + suf
116
+
117
+
118
+ sample = preprocess_newbing_out(sample)
119
+ sample = close_up_code_segment_during_stream(sample)
120
+ sample = markdown_convertion(sample)
121
+ with open('tmp.html', 'w', encoding='utf8') as f:
122
+ f.write("""
123
+
124
+ <head>
125
+ <title>My Website</title>
126
+ <link rel="stylesheet" type="text/css" href="style.css">
127
+ </head>
128
+
129
+ """)
130
+ f.write(sample)
docs/translate_english.json ADDED
The diff for this file is too large to render. See raw diff
 
docs/translate_japanese.json ADDED
The diff for this file is too large to render. See raw diff
 
docs/translate_traditionalchinese.json ADDED
@@ -0,0 +1,1515 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "print亮黄": "PrintBrightYellow",
3
+ "print亮绿": "PrintBrightGreen",
4
+ "print亮红": "PrintBrightRed",
5
+ "print红": "PrintRed",
6
+ "print绿": "PrintGreen",
7
+ "print黄": "PrintYellow",
8
+ "print蓝": "PrintBlue",
9
+ "print紫": "PrintPurple",
10
+ "print靛": "PrintIndigo",
11
+ "print亮蓝": "PrintBrightBlue",
12
+ "print亮紫": "PrintBrightPurple",
13
+ "print亮靛": "PrintBrightIndigo",
14
+ "读文章写摘要": "ReadArticleWriteSummary",
15
+ "批量生成函数注释": "BatchGenerateFunctionComments",
16
+ "生成函数注释": "GenerateFunctionComments",
17
+ "解析项目本身": "ParseProjectItself",
18
+ "解析项目源代码": "ParseProjectSourceCode",
19
+ "解析一个Python项目": "ParsePythonProject",
20
+ "解析一个C项目的头文件": "ParseCProjectHeaderFile",
21
+ "解析一个C项目": "ParseCProject",
22
+ "解析一个Rust项目": "ParseRustProject",
23
+ "解析一个Java项目": "ParseJavaProject",
24
+ "解析一个前端项目": "ParseAFrontEndProject",
25
+ "高阶功能模板函数": "HigherOrderFeatureTemplateFunction",
26
+ "高级功能函数模板": "AdvancedFeatureFunctionTemplate",
27
+ "全项目切换英文": "SwitchEntireProjectToEnglish",
28
+ "代码重写为全英文_多线程": "RewriteCodeToEnglishMultithreading",
29
+ "Latex英文润色": "LatexEnglishPolishing",
30
+ "Latex全文润色": "LatexWholeDocumentPolishing",
31
+ "同时问询": "InquireSimultaneously",
32
+ "询问多个大语言模型": "InquireMultipleLargeLanguageModels",
33
+ "解析一个Lua项目": "ParseALuaProject",
34
+ "解析一个CSharp项目": "ParseACSharpProject",
35
+ "总结word文档": "SummarizeWordDocument",
36
+ "解析ipynb文件": "ParseIpynbFile",
37
+ "解析JupyterNotebook": "ParseJupyterNotebook",
38
+ "对话历史存档": "ConversationHistoryArchive",
39
+ "载入对话历史存档": "LoadConversationHistoryArchive",
40
+ "删除所有本地对话历史记录": "DeleteAllLocalConversationHistoryRecords",
41
+ "Markdown英译中": "MarkdownEnglishToChinese",
42
+ "批量Markdown翻译": "BatchMarkdownTranslation",
43
+ "批量总结PDF文档": "BatchSummarizePDFDocuments",
44
+ "批量总结PDF文档pdfminer": "BatchSummarizePDFDocumentsPdfminer",
45
+ "批量翻译PDF文档": "BatchTranslatePDFDocuments",
46
+ "批量翻译PDF文档_多线程": "BatchTranslatePdfDocumentsMultithreaded",
47
+ "谷歌检索小助手": "GoogleSearchAssistant",
48
+ "理解PDF文档内容标准文件输入": "StandardFileInputForUnderstandingPdfDocumentContent",
49
+ "理解PDF文档内容": "UnderstandingPdfDocumentContent",
50
+ "Latex中文润色": "ChineseProofreadingInLatex",
51
+ "Latex中译英": "ChineseToEnglishTranslationInLatex",
52
+ "Latex全文翻译": "FullTextTranslationInLatex",
53
+ "Latex英译中": "EnglishToChineseTranslationInLatex",
54
+ "Markdown中译英": "ChineseToEnglishTranslationInMarkdown",
55
+ "下载arxiv论文并翻译摘要": "DownloadArxivPapersAndTranslateAbstract",
56
+ "下载arxiv论文翻译摘要": "DownloadArxivPapersTranslateAbstract",
57
+ "连接网络回答问题": "ConnectToInternetToAnswerQuestions",
58
+ "联网的ChatGPT": "ChatGPTConnectedToInternet",
59
+ "解析任意code项目": "ParsingAnyCodeProject",
60
+ "同时问询_指定模型": "InquiryWithSpecifiedModelSimultaneously",
61
+ "图片生成": "ImageGeneration",
62
+ "test_解析ipynb文件": "TestParsingIpynbFile",
63
+ "把字符太少的块清除为回车": "RemoveBlocksWithTooFewCharactersToNewline",
64
+ "清理多余的空行": "CleaningUpExtraBlankLines",
65
+ "合并小写开头的段落块": "MergeParagraphBlocksStartingWithLowerCase",
66
+ "多文件润色": "ProofreadingMultipleFiles",
67
+ "多文件翻译": "TranslationOfMultipleFiles",
68
+ "解析docx": "ParseDocx",
69
+ "解析PDF": "ParsePDF",
70
+ "解析Paper": "ParsePaper",
71
+ "ipynb解释": "IpynbInterpret",
72
+ "解析源代码新": "ParseSourceCodeNew",
73
+ "输入区": "輸入區",
74
+ "获取文章meta信息": "獲取文章meta信息",
75
+ "等待": "等待",
76
+ "不能正常加载MOSS的参数!": "無法正常加載MOSS的參數!",
77
+ "橙色": "橙色",
78
+ "窗口布局": "窗口佈局",
79
+ "需要安装pip install py7zr来解压7z文件": "需要安裝pip install py7zr來解壓7z文件",
80
+ "上下布局": "上下佈局",
81
+ "打开文件": "打開文件",
82
+ "可能需要分组处理": "可能需要分組處理",
83
+ "用tex格式": "用tex格式",
84
+ "按Shift+Enter换行": "按Shift+Enter換行",
85
+ "输入路径或上传压缩包": "輸入路徑或上傳壓縮包",
86
+ "翻译成地道的中文": "翻譯成地道的中文",
87
+ "上下文": "上下文",
88
+ "请耐心完成后再提交新问题": "請耐心完成後再提交新問題",
89
+ "可以直接修改对话界面内容": "可以直接修改對話界面內容",
90
+ "检测输入参数": "檢測輸入參數",
91
+ "也许会导致低配计算机卡死 ……": "也許會導致低配計算機卡死……",
92
+ "html格式": "html格式",
93
+ "不能识别的URL!": "無法識別的URL!",
94
+ "第2步": "第2步",
95
+ "若上传压缩文件": "若上傳壓縮文件",
96
+ "多线程润色开始": "多線程潤色開始",
97
+ "警告!API_URL配置选项将被弃用": "警告!API_URL配置選項將被棄用",
98
+ "非OpenAI官方接口的出现这样的报错": "非OpenAI官方接口出現這樣的錯誤",
99
+ "如果没找到任何文件": "如果沒找到任何文件",
100
+ "生成一份任务执行报告": "生成一份任務執行報告",
101
+ "而cl**h 的默认本地协议是http": "而cl**h的默認本地協議是http",
102
+ "gpt_replying_buffer也写完了": "gpt_replying_buffer也寫完了",
103
+ "是本次输出": "是本次輸出",
104
+ "展现在报告中的输入": "展現在報告中的輸入",
105
+ "和端口": "和端口",
106
+ "Pay-as-you-go users的限制是每分钟3500次": "Pay-as-you-go用戶的限制是每分鐘3500次",
107
+ "既可以写": "既可以寫",
108
+ "输入清除键": "輸入清除鍵",
109
+ "gpt模型参数": "gpt模型參數",
110
+ "直接清除历史": "直接清除歷史",
111
+ "当前模型": "當前模型",
112
+ ";5、中文摘要翻译": ";5、中文摘要翻譯",
113
+ "将markdown转化为好看的html": "將markdown轉換為好看的html",
114
+ "谷歌学术检索助手": "谷歌學術檢索助手",
115
+ "后语": "後語",
116
+ "请确认是否满足您的需要": "請確認是否滿足您的需要",
117
+ "本地路径": "本地路徑",
118
+ "sk-此处填API密钥": "sk-此處填API密鑰",
119
+ "正常结束": "正常結束",
120
+ "排除了以上两个情况": "排除了以上兩個情況",
121
+ "把gradio的运行地址更改到指定的二次路径上": "將gradio的運行地址更改到指定的二次路徑上",
122
+ "配置其Path环境变量": "配置其Path環境變量",
123
+ "的第": "的第",
124
+ "减少重复": "減少重複",
125
+ "如果超过期限没有喂狗": "如果超過期限沒有餵狗",
126
+ "函数的说明请见 request_llm/bridge_all.py": "函數的說明請見 request_llm/bridge_all.py",
127
+ "第7步": "第7步",
128
+ "说": "說",
129
+ "中途接收可能的终止指令": "中途接收可能的終止指令",
130
+ "第5次尝试": "第5次嘗試",
131
+ "gradio可用颜色列表": "gradio可用顏色列表",
132
+ "返回的结果是": "返回的結果是",
133
+ "出现的所有文章": "所有出現的文章",
134
+ "更换LLM模型/请求源": "更換LLM模型/請求源",
135
+ "调用NewBing时": "調用NewBing時",
136
+ "AutoGPT是什么": "AutoGPT是什麼",
137
+ "则换行符更有可能表示段落分隔": "則換行符更有可能表示段落分隔",
138
+ "接收文件后与chatbot的互动": "接收文件後與chatbot的互動",
139
+ "每个子任务展现在报告中的输入": "每個子任務展現在報告中的輸入",
140
+ "按钮见functional.py": "按鈕見functional.py",
141
+ "地址🚀": "地址🚀",
142
+ "将长文本分离开来": "將長文本分離開來",
143
+ "ChatGLM消耗大量的内存": "ChatGLM消耗大量的內存",
144
+ "使用 lru缓存 加快转换速度": "使用lru緩存加快轉換速度",
145
+ "屏蔽掉 chatglm的多线程": "屏蔽掉chatglm的多線程",
146
+ "不起实际作用": "不起實際作用",
147
+ "先寻找到解压的文件夹路径": "先尋找到解壓的文件夾路徑",
148
+ "观察窗": "觀察窗",
149
+ "请解释以下代码": "請解釋以下代碼",
150
+ "使用中文回答我的问题": "使用中文回答我的問題",
151
+ "备份一个文件": "備份一個文件",
152
+ "未知": "未知",
153
+ "如.md": "#",
154
+ "**输入参数说明**": "#",
155
+ "如果这裡拋出異常": "#",
156
+ "多線程操作已經開始": "#",
157
+ "備份和下載": "#",
158
+ "新版本可用": "#",
159
+ "將要忽略匹配的文件後綴": "#",
160
+ "可調節線程池的大小避免openai的流量限制錯誤": "#",
161
+ "使用Unsplash API": "#",
162
+ "ChatGPT綜合": "#",
163
+ "從摘要中提取高價值信息": "#",
164
+ "借助此參數": "#",
165
+ "知乎": "#",
166
+ "其他錯誤": "#",
167
+ "退出": "#",
168
+ "對話歷史寫入": "#",
169
+ "問詢記錄": "#",
170
+ "依次訪問網頁": "#",
171
+ "NewBing響應異常": "#",
172
+ "jittorllms尚未加載": "#",
173
+ "等待NewBing响应": "等待NewBing回應",
174
+ "找不到任何CSharp文件": "找不到任何CSharp檔案",
175
+ "插件demo": "插件範例",
176
+ "1. 把input的余量留出来": "1. 留出input的餘量",
177
+ "如果文章被切分了": "如果文章被切分了",
178
+ "或者您没有获得体验资格": "或者您沒有獲得體驗資格",
179
+ "修正值": "修正值",
180
+ "正在重试": "正在重試",
181
+ "展示分割效果": "展示分割效果",
182
+ "已禁用": "已禁用",
183
+ "抽取摘要": "抽取摘要",
184
+ "下载完成": "下載完成",
185
+ "无法连接到该网页": "無法連接到該網頁",
186
+ "根据以上的对话": "根據以上的對話",
187
+ "第1次尝试": "第1次嘗試",
188
+ "我们用最暴力的方法切割": "我們用最暴力的方法切割",
189
+ "回滚代码到原始的浏览器打开函数": "回滾程式碼到原始的瀏覽器��啟函數",
190
+ "先上传存档或输入路径": "先上傳存檔或輸入路徑",
191
+ "避免代理网络产生意外污染": "避免代理網路產生意外污染",
192
+ "发送图片时": "傳送圖片時",
193
+ "第二步": "第二步",
194
+ "完成": "完成",
195
+ "搜索页面中": "搜索頁面中",
196
+ "下载中": "下載中",
197
+ "重试一次": "重試一次",
198
+ "历史上的今天": "歷史上的今天",
199
+ "2. 替换跨行的连词": "2. 替換跨行的連詞",
200
+ "协议": "協議",
201
+ "批量ChineseToEnglishTranslationInMarkdown": "批量Markdown中文轉英文翻譯",
202
+ "也可以直接是": "也可以直接是",
203
+ "插件模型的参数": "插件模型的參數",
204
+ "也可以根据之前的内容长度来判断段落是否已经足够长": "也可以根據之前的內容長度來判斷段落是否已經足夠長",
205
+ "引入一个有cookie的chatbot": "引入一個有cookie的聊天機器人",
206
+ "任何文件": "任何文件",
207
+ "代码直接生效": "代碼直接生效",
208
+ "高级实验性功能模块调用": "高級實驗性功能模塊調用",
209
+ "修改函数插件代码后": "修改函數插件代碼後",
210
+ "按Enter提交": "按Enter提交",
211
+ "天蓝色": "天藍色",
212
+ "子任务失败时的重试次数": "子任務失敗時的重試次數",
213
+ "格式须是": "請輸入正確的格式",
214
+ "调用主体": "調用主體",
215
+ "有些文章的正文部分字体大小不是100%统一的": "有些文章正文中字體大小不統一",
216
+ "线程": "執行緒",
217
+ "是否一键更新代码": "是否一鍵更新程式碼",
218
+ "除了基础的pip依赖以外": "除了基礎的pip依賴外",
219
+ "紫色": "紫色",
220
+ "同样支持多线程": "同樣支援多執行緒",
221
+ "这个中文的句号是故意的": "這個中文句號是故意的",
222
+ "获取所有文章的标题和作者": "取得所有文章的標題和作者",
223
+ "Incorrect API key. OpenAI以提供了不正确的API_KEY为由": "API金鑰錯誤。OpenAI提供了錯誤的API_KEY",
224
+ "绿色": "綠色",
225
+ "异常": "異常",
226
+ "pip install pywin32 用于doc格式": "pip install pywin32 用於doc格式",
227
+ "也可以写": "也可以寫",
228
+ "请对下面的文章片段用中文做一个概述": "請用中文對下面的文章片段做一個概述",
229
+ "上下文管理器是一种Python对象": "上下文管理器是一種Python物件",
230
+ "处理文件的上传": "處理檔案的上傳",
231
+ "尝试Prompt": "嘗試Prompt",
232
+ "检查USE_PROXY选项是否修改": "檢查USE_PROXY選項是否修改",
233
+ "改为True应用代理": "將True更改為應用代理",
234
+ "3. 如果余量太小了": "如果餘量太小",
235
+ "老旧的Demo": "舊版Demo",
236
+ "第一部分": "第一部分",
237
+ "插件参数区": "插件參數區",
238
+ "历史中哪些事件发生在": "歷史中哪些事件發生在",
239
+ "现将您的现有配置移动至config_private.py以防止配置丢失": "現在將您現有的配置移動到config_private.py以防止配置丟失",
240
+ "当你想发送一张照片时": "當你想發送一張照片時",
241
+ "接下来请将以下代码中包含的所有中文转化为英文": "接下來請將以下代碼中包含的所有中文轉化為英文",
242
+ "i_say=真正给chatgpt的提问": "i_say=真正給chatgpt的提問",
243
+ "解析整个C++项目头文件": "解析整個C++項目頭文件",
244
+ "需要安装pip install rarfile来解压rar文件": "需要安裝pip install rarfile來解壓rar文件",
245
+ "把已经获取的数据显示出去": "顯示已經獲取的數據",
246
+ "红色": "紅色",
247
+ "异步任务结束": "異步任務結束",
248
+ "进行学术解答": "進行學術解答",
249
+ "config_private.py放自己的秘密如API和代理网址": "config_private.py放自己的秘密如API和代理網址",
250
+ "学术中英互译": "學術中英互譯",
251
+ "选择处理": "選擇處理",
252
+ "利用以上信息": "利用以上信息",
253
+ "暂时先这样顶一下": "暫時先這樣頂一下",
254
+ "如果中文效果不理想": "如果中文效果不理想",
255
+ "常见协议无非socks5h/http": "常見協議無非socks5h/http",
256
+ "返回文本内容": "返回文本內容",
257
+ "用于重组输入参数": "用於重組輸入參數",
258
+ "第8步": "第8步",
259
+ "可能处于折叠状态": "可能處於折疊狀態",
260
+ "重置": "重置",
261
+ "清除": "清除",
262
+ "放到每个子线程中分别执行": "放到每個子線程中分別執行",
263
+ "载入对话历史文件": "載入對話歷史文件",
264
+ "列举两条并发送相关图片": "列舉兩條並發送相關圖片",
265
+ "然后重试": "然後重試",
266
+ "重新URL重新定向": "重新URL重新定向",
267
+ "内部函数通过使用importlib模块的reload函数和inspect模块的getmodule函数来重新加载并获取函数模块": "內部函數通過使用importlib模塊的reload函數和inspect模塊的getmodule函數來重新加載並獲取函數模塊",
268
+ "第一层列表是子任务分解": "第一層列表是子任務分解",
269
+ "为发送请求做准备": "為發送請求做準備",
270
+ "暂时没有用武之地": "暫時沒有用武之地",
271
+ "并对文件中的所有函数生成注释": "並對文件中的所有函數生成註釋",
272
+ "分解连字": "分解連字",
273
+ "不输入文件名": "不輸入檔案名稱",
274
+ "并相应地进行替换": "並相應地進行替換",
275
+ "在实验过程中发现调用predict_no_ui处理长文档时": "在實驗過程中發現調用predict_no_ui處理長文檔時",
276
+ "提取文本块主字体": "提取文本塊主字體",
277
+ "temperature是chatGPT的内部调优参数": "temperature是chatGPT的內部調優參數",
278
+ "没办法了": "沒辦法了",
279
+ "获取正文主字体": "獲取正文主字體",
280
+ "看门狗": "看門狗",
281
+ "当前版本": "當前版本",
282
+ "这个函数是用来获取指定目录下所有指定类型": "這個函數是用來獲取指定目錄下所有指定類型",
283
+ "api_key已导入": "api_key已導入",
284
+ "找不到任何.tex或.pdf文件": "找不到任何.tex或.pdf檔案",
285
+ "You exceeded your current quota. OpenAI以账户额度不足为由": "您超出了當前配額。OpenAI以帳戶額度不足為由",
286
+ "自动更新程序": "自動更新程式",
287
+ "并且不要有反斜线": "並且不要有反斜線",
288
+ "你必须逐个文献进行处理": "您必須逐個文獻進行處理",
289
+ "本地文件地址": "本地檔案地址",
290
+ "提取精炼信息": "提取精煉資訊",
291
+ "设置用户名和密码": "設置使用者名稱和密碼",
292
+ "请不吝PR!": "請不吝PR!",
293
+ "通过把連字": "通過將連字",
294
+ "文件路徑列表": "檔案路徑清單",
295
+ "判定為數據流的結束": "判定為資料流的結束",
296
+ "參數": "參數",
297
+ "避免不小心傳github被別人看到": "避免不小心傳到github被別人看到",
298
+ "記錄刪除註釋後的文本": "記錄刪除註釋後的文字",
299
+ "比正文字體小": "比正文字體小",
300
+ "上傳本地文件可供紅色函數插件調用": "上傳本地文件供紅色函數插件調用",
301
+ "生成圖像": "生成圖像",
302
+ "追加歷史": "追加歷史",
303
+ "網絡代理狀態": "網路代理狀態",
304
+ "不需要再次轉化": "不需要再次轉換",
305
+ "帶超時倒計時": "帶有超時倒數計時",
306
+ "保存當前對話": "儲存目前對話",
307
+ "等待響應": "等待回應",
308
+ "依賴檢測通過": "依賴檢查通過",
309
+ "如果要使用ChatGLM": "如果要使用ChatGLM",
310
+ "對IPynb文件進行解析": "對IPynb檔案進行解析",
311
+ "先切換模型到openai或api2d": "先切換模型到openai或api2d",
312
+ "塊元提取": "區塊元素提取",
313
+ "调用路径参数已自动修正到": "調用路徑參數已自動修正到",
314
+ "且下一个字符为大写字母": "且下一個字符為大寫字母",
315
+ "无": "無",
316
+ "$c$是光速": "$c$是光速",
317
+ "发送请求到OpenAI后": "發送請求到OpenAI後",
318
+ "您也可以选择删除此行警告": "您也可以選擇刪除此行警告",
319
+ "i_say_show_user=给用户看的提问": "i_say_show_user=給用戶看的提問",
320
+ "Endpoint 重定向": "Endpoint 重定向",
321
+ "基础功能区": "基礎功能區",
322
+ "根据以上你自己的分析": "根據以上你自己的分析",
323
+ "以上文件将被作为输入参数": "以上文件將被作為輸入參數",
324
+ "已完成": "已完成",
325
+ "第2次尝试": "第2次嘗試",
326
+ "若输入0": "若輸入0",
327
+ "自动缩减文本": "自動縮減文本",
328
+ "顺利完成": "順利完成",
329
+ "收到": "收到",
330
+ "打开浏览器": "打開瀏覽器",
331
+ "第5步": "第5步",
332
+ "Free trial users的限制是每分钟3次": "Free trial users的限制是每分鐘3次",
333
+ "请用markdown格式输出": "請用 Markdown 格式輸出",
334
+ "模仿ChatPDF": "模仿 ChatPDF",
335
+ "等待多久判定为超时": "等待多久判定為超時",
336
+ "/gpt_log/总结论文-": "/gpt_log/總結論文-",
337
+ "请结合互联网信息回答以下问题": "請結合互聯網信息回答以下問題",
338
+ "IP查询频率受限": "IP查詢頻率受限",
339
+ "高级参数输入区的显示提示": "高級參數輸入區的顯示提示",
340
+ "的高级参数说明": "的高級參數說明",
341
+ "默认开启": "默認開啟",
342
+ "为实现更多强大的功能做基础": "為實現更多強大的功能做基礎",
343
+ "中文学术润色": "中文學術潤色",
344
+ "注意这里的历史记录被替代了": "注意這裡的歷史記錄被替代了",
345
+ "子线程任务": "子線程任務",
346
+ "个": "個",
347
+ "正在加载tokenizer": "正在加載 tokenizer",
348
+ "生成http请求": "生成 HTTP 請求",
349
+ "从而避免解析压缩文件": "從而避免解析壓縮文件",
350
+ "加载参数": "加載參數",
351
+ "由于输入长度限制": "由於輸入長度限制",
352
+ "如果直接在海外服务器部署": "如果直接在海外伺服器部署",
353
+ "你提供了错误的API_KEY": "你提供了錯誤的API_KEY",
354
+ "history 是之前的对话列表": "history 是之前的對話列表",
355
+ "实现更换API_URL的作用": "實現更換API_URL的作用",
356
+ "Json解析不合常规": "Json解析不合常規",
357
+ "函数插件-下拉菜单与随变按钮的互动": "函數插件-下拉菜單與隨變按鈕的互動",
358
+ "则先将公式转换为HTML格式": "則先將公式轉換為HTML格式",
359
+ "1. 临时解决方案": "1. 臨時解決方案",
360
+ "如1812.10695": "如1812.10695",
361
+ "最后用中文翻译摘要部分": "最後用中文翻譯摘要部分",
362
+ "MOSS响应异常": "MOSS響應異常",
363
+ "读取pdf文件": "讀取pdf文件",
364
+ "重试的次数限制": "重試的次數限制",
365
+ "手动指定询问哪些模型": "手動指定詢問哪些模型",
366
+ "情况会好转": "情況會好轉",
367
+ "超过512个": "超過512個",
368
+ "多线": "多線",
369
+ "底部输入区": "底部輸入區",
370
+ "合并小写字母开头的段落块并替换为空格": "合併小寫字母開頭的段落塊並替換為空格",
371
+ "暗色主题": "暗色主題",
372
+ "提高限制请查询": "提高限制請查詢",
373
+ "您还需要运行": "您還需要執行",
374
+ "将双空行": "將雙空行",
375
+ "请削减单次输入的文本量": "請減少單次輸入的文本量",
376
+ "提高语法、清晰度和整体可读性": "提高語法、清晰度和整體可讀性",
377
+ "删除其中的所有注释": "刪除其中的所有註釋",
378
+ "列表长度为子任务的数量": "列表長度為子任務的數量",
379
+ "直接在输入区键入api_key": "直接在輸入區鍵入api_key",
380
+ "方法会在代码块被执行前被调用": "方法會在代碼塊被執行前被調用",
381
+ "懂的都懂": "懂的都懂",
382
+ "加一个live2d装饰": "加一個live2d裝飾",
383
+ "请从中提取出“标题”、“收录会议或期刊”、“作者”、“摘要”、“编号”、“作者邮箱”这六个部分": "請從中提取出“標題”、“收錄會議或期刊”、“作者”、“摘要”、“編號”、“作者郵箱”這六個部分",
384
+ "聊天历史": "聊天歷史",
385
+ "将插件中出的所有问题显示在界面上": "將插件中出的所有問題顯示在界面上",
386
+ "每个子任务的输入": "每個子任務的輸入",
387
+ "yield一次以刷新前端页面": "yield一次以刷新前端頁面",
388
+ "不能自定义字体和颜色": "不能自定義字體和顏色",
389
+ "如果本地使用不建议加这个": "如果本地使用不建議加這個",
390
+ "例如chatglm&gpt-3.5-turbo&api2d-gpt-4": "例如chatglm&gpt-3.5-turbo&api2d-gpt-4",
391
+ "尝试": "嘗試",
392
+ "什么都没有": "什麼都沒有",
393
+ "代理设置": "代理設置",
394
+ "请求处理结束": "請求處理結束",
395
+ "将结果写入markdown文件中": "將結果寫入markdown文件中",
396
+ "experiment等": "實驗等",
397
+ "添加一个萌萌的看板娘": "添加一個萌萌的看板娘",
398
+ "现在": "現在",
399
+ "当前软件运行的端口号": "當前軟件運行的端口號",
400
+ "第n组插件": "第n組插件",
401
+ "不受git管控": "不受git管控",
402
+ "基础功能区的回调函数注册": "基礎功能區的回調函數註冊",
403
+ "句子结束标志": "句子結束標誌",
404
+ "GPT参数": "GPT參數",
405
+ "按输入的匹配模式寻找上传的非压缩文件和已解压的文件": "按輸入的匹配模式尋找上傳的非壓縮文件和已解壓的文件",
406
+ "函数插件贡献者": "函數插件貢獻者",
407
+ "用户提示": "用戶提示",
408
+ "此版本使用pdfminer插件": "此版本使用pdfminer插件",
409
+ "如果换行符前为句子结束标志": "如果換行符前為句子結束標誌",
410
+ "在gpt输出代码的中途": "在gpt輸出代碼的中途",
411
+ "中转网址预览": "中轉網址預覽",
412
+ "自动截断": "自動截斷",
413
+ "当無法用標點、空行分割時": "當無法用標點、空行分割時",
414
+ "意外Json結構": "意外的Json結構",
415
+ "需要讀取和清理文本的pdf文件路徑": "需要讀取和清理文本的pdf文件路徑",
416
+ "HotReload的裝飾器函數": "HotReload的裝飾器函數",
417
+ "chatGPT 分析報告": "chatGPT 分析報告",
418
+ "如參考文獻、腳註、圖註等": "如參考文獻、腳註、圖註等",
419
+ "的api-key": "的api-key",
420
+ "第二組插件": "第二組插件",
421
+ "當前代理可用性": "當前代理可用性",
422
+ "列表遞歸接龍": "列表遞歸接龍",
423
+ "這個bug沒找到觸發條件": "這個bug沒找到觸發條件",
424
+ "喚起高級參數輸入區": "喚起高級參數輸入區",
425
+ "但大部分場合下並不需要修改": "但大部分場合下並不需要修改",
426
+ "盡量是完整的一個section": "盡量是完整的一個section",
427
+ "如果OpenAI不響應": "如果OpenAI不響應",
428
+ "等文本特殊符號轉換為其基本形式來對文本進行歸一化處理": "等文本特殊符號轉換為其基本形式來對文本進行歸一化處理",
429
+ "你的回答必須簡單明了": "你的回答必須簡單明了",
430
+ "對話歷史文件損壞��": "對話歷史文件損壞!",
431
+ "每一塊": "每一塊",
432
+ "如果某個子任務出錯": "如果某個子任務出錯",
433
+ "切分和重新整合": "切分和重新整合",
434
+ "Token限制下的截断与处理": "Token限制下的截斷與處理",
435
+ "仅支持Win平台": "僅支持Win平臺",
436
+ "并行任务数量限制": "並行任務數量限制",
437
+ "已重置": "已重置",
438
+ "如果要使用Newbing": "如果要使用Newbing",
439
+ "前言": "前言",
440
+ "理解PDF论文内容": "理解PDF論文內容",
441
+ "如果有的话": "如果有的話",
442
+ "功能区显示开关与功能区的互动": "功能區顯示開關與功能區的互動",
443
+ "前者API2D的": "前者API2D的",
444
+ "如果要使用MOSS": "如果要使用MOSS",
445
+ "源文件太多": "源文件太多",
446
+ "ChatGLM尚未加载": "ChatGLM尚未加載",
447
+ "不可高于3": "不可高於3",
448
+ "运行方法 python crazy_functions/crazy_functions_test.py": "運行方法 python crazy_functions/crazy_functions_test.py",
449
+ "清除历史": "清除歷史",
450
+ "如果要使用jittorllms": "如果要使用jittorllms",
451
+ "更换模型 & SysPrompt & 交互界面布局": "更換模型 & SysPrompt & 交互界面布局",
452
+ "是之前的对话列表": "是之前的對話列表",
453
+ "开始了吗": "開始了嗎",
454
+ "输入": "輸入",
455
+ "打开你的*学*网软件查看代理的协议": "打開你的*學*網軟件查看代理的協議",
456
+ "默认False": "默認False",
457
+ "获取页面上的文本信息": "獲取頁面上的文本信息",
458
+ "第一页清理后的文本内容列表": "第一頁清理後的文本內容列表",
459
+ "并定义了一个名为decorated的内部函数": "並定義了一個名為decorated的內部函數",
460
+ "你是一个学术翻译": "你是一個學術翻譯",
461
+ "OpenAI拒绝了请求": "OpenAI拒絕了請求",
462
+ "提示": "提示",
463
+ "返回重试": "返回重試",
464
+ "以下“红颜色”标识的函数插件需从输入区读取路径作为参数": "以下“紅顏色”標識的函數插件需從輸入區讀取路徑作為參數",
465
+ "这个函数用stream的方式解决这个问题": "這個函數用stream的方式解決這個問題",
466
+ "ChatGPT 学术优化": "ChatGPT 學術優化",
467
+ "去除短块": "去除短塊",
468
+ "第一组插件": "第一組插件",
469
+ "这是什么": "這是什麼",
470
+ "在传递chatbot的过程中不要将其丢弃": "在傳遞chatbot的過程中不要將其丟棄",
471
+ "下载PDF文档": "下載PDF文檔",
472
+ "以下是信息源": "以下是信息源",
473
+ "本组文件为": "本組檔案為",
474
+ "更新函数代码": "更新函數代碼",
475
+ "解析的结果如下": "解析的結果如下",
476
+ "逻辑较乱": "邏輯較亂",
477
+ "存入": "存入",
478
+ "具备完备的交互功能": "具備完備的交互功能",
479
+ "安装jittorllms依赖后将完全破坏现有的pytorch环境": "安裝jittorllms依賴後將完全破壞現有的pytorch環境",
480
+ "看门狗的耐心": "看門狗的耐心",
481
+ "点击展开“文件上传区”": "點擊展開“文件上傳區”",
482
+ "翻译摘要等": "翻譯摘要等",
483
+ "返回值": "返回值",
484
+ "默认允许多少路线程同时访问OpenAI": "默認允許多少路線程同時訪問OpenAI",
485
+ "这是第": "這是第",
486
+ "把本项目源代码切换成全英文": "把本項目源代碼切換成全英文",
487
+ "找不到任何html文件": "找不到任何html文件",
488
+ "假如重启失败": "假如重啟失敗",
489
+ "感谢热情的": "感謝熱情的",
490
+ "您若希望分享新的功能模组": "您若希望分享新的功能模組",
491
+ "并在新模块中重新加载函数": "並在新模塊中重新加載函數",
492
+ "则会在溢出时暴力截断": "則會在溢出時暴力截斷",
493
+ "源码自译解": "原始碼自譯解",
494
+ "开始正式执行任务": "開始正式執行任務",
495
+ "ChatGLM响应异常": "ChatGLM響應異常",
496
+ "用户界面对话窗口句柄": "用戶界面對話窗口句柄",
497
+ "左右布局": "左右佈局",
498
+ "后面两句是": "後面兩句是",
499
+ "可同时填写多个API-KEY": "可同時填寫多個API-KEY",
500
+ "对各个llm模型进行单元测试": "對各個llm模型進行單元測試",
501
+ "为了更好的效果": "為了更好的效果",
502
+ "jittorllms 没有 sys_prompt 接口": "jittorllms沒有sys_prompt接口",
503
+ "直接取出来": "直接取出來",
504
+ "不具备多线程能力的函数": "不具備多線程能力的函數",
505
+ "单行 + 字体大": "單行+字體大",
506
+ "正在分析一个源代码项目": "正在分析一個源代碼項目",
507
+ "直接退出": "直接退出",
508
+ "稍后可能需要再试一次": "稍後可能需要再試一次",
509
+ "开始重试": "開始重試",
510
+ "没有 sys_prompt 接口": "沒有sys_prompt接口",
511
+ "只保留文件名节省token": "只保留文件名節省token",
512
+ "肯定已经都结束了": "肯定已經都結束了",
513
+ "用&符號分隔": "&",
514
+ "但本地存儲了以下歷史文件": "以下是��地儲存的歷史文件清單",
515
+ "對全文進行概括": "全文概述",
516
+ "以下是一篇學術論文的基礎信息": "以下是學術論文的基本信息",
517
+ "正在提取摘要並下載PDF文檔……": "正在提取摘要並下載PDF文件……",
518
+ "1. 對原始文本進行歸一化處理": "1. 正規化原始文本",
519
+ "問題": "問題",
520
+ "用於基礎的對話功能": "基本對話功能",
521
+ "獲取設置": "獲取設置",
522
+ "如果缺少依賴": "如果缺少依賴項",
523
+ "第6步": "第6步",
524
+ "處理markdown文本格式的轉變": "處理Markdown文本格式轉換",
525
+ "功能、貢獻者": "功能、貢獻者",
526
+ "中文Latex項目全文潤色": "中文LaTeX項目全文潤色",
527
+ "等待newbing回復的片段": "等待newbing回復的片段",
528
+ "寫入文件": "寫入文件",
529
+ "下載pdf文件未成功": "下載PDF文件失敗",
530
+ "將生成的報告自動投射到文件上傳區": "將生成的報告自動上傳到文件區",
531
+ "函數插件作者": "函數插件作者",
532
+ "將要匹配的模式": "將要匹配的模式",
533
+ "所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log": "所有詢問記錄將自動保存在本地目錄./gpt_log/chat_secrets.log",
534
+ "正在分析一个项目的源代码": "正在分析一個專案的源代碼",
535
+ "使每个段落之间有两个换行符分隔": "使每個段落之間有兩個換行符分隔",
536
+ "并在被装饰的函数上执行": "並在被裝飾的函數上執行",
537
+ "更新完成": "更新完成",
538
+ "请先把模型切换至gpt-xxxx或者api2d-xxxx": "請先把模型切換至gpt-xxxx或者api2d-xxxx",
539
+ "结果写入文件": "結果寫入文件",
540
+ "在执行过程中遭遇问题": "在執行過程中遭遇問題",
541
+ "找不到任何文件": "找不到任何文件",
542
+ "给gpt的静默提醒": "給gpt的靜默提醒",
543
+ "远程返回错误": "遠程返回錯誤",
544
+ "例如\\section": "例如\\section",
545
+ "该函数详细注释已添加": "該函數詳細注釋已添加",
546
+ "对文本进行归一化处理": "對文本進行歸一化處理",
547
+ "注意目前不能多人同时调用NewBing接口": "注意目前不能多人同時調用NewBing接口",
548
+ "来保留函数的元信息": "來保留函數的元信息",
549
+ "一般是文本过长": "一般是文本過長",
550
+ "切割PDF": "切割PDF",
551
+ "开始下一个循环": "開始下一個循環",
552
+ "正在开始汇总": "正在開始匯總",
553
+ "建议使用docker环境!": "建議使用docker環境!",
554
+ "质能方程是描述质量与能量之间的当量关系的方程": "質能方程是描述質量與能量之間的當量關係的方程",
555
+ "子进程执行": "子進程執行",
556
+ "清理后的文本内容字符串": "清理後的文本內容字串",
557
+ "石板色": "石板色",
558
+ "Bad forward key. API2D账户额度不足": "Bad forward key. API2D帳戶額度不足",
559
+ "摘要在 .gs_rs 中的文本": "摘要在 .gs_rs 中的文本",
560
+ "请复制并转到以下URL": "請複製並轉到以下URL",
561
+ "然后用for+append循环重新赋值": "然後用for+append循環重新賦值",
562
+ "文章极长": "文章極長",
563
+ "请从数据中提取信息": "請從數據中提取信息",
564
+ "为了安全而隐藏绝对地址": "為了安全而隱藏絕對地址",
565
+ "OpenAI绑了信用卡的用户可以填 16 或者更高": "OpenAI綁了信用卡的用戶可以填 16 或者更高",
566
+ "gpt4现在只对申请成功的人开放": "gpt4現在只對申請成功的人開放",
567
+ "问号": "問號",
568
+ "并合并为一个字符串": "並合併為一個字串",
569
+ "文件上传区": "文件上傳區",
570
+ "这个函数运行在主进程": "這個函數運行在主進程",
571
+ "执行中": "執行中",
572
+ "修改函数插件后": "修改函數插件後",
573
+ "请你阅读以下学术论文相关的材料": "請你閱讀以下學術論文相關的材料",
574
+ "加载需要一段时间": "加載需要一段時間",
575
+ "单线程": "單線程",
576
+ "5s之后重启": "5秒後重啟",
577
+ "文件名是": "文件名是",
578
+ "主进程执行": "主進程執行",
579
+ "如何理解传奇?": "如何理解傳奇?",
580
+ "解析整个Java项目": "解析整個Java項目",
581
+ "已成功": "已成功",
582
+ "该函数面向希望实现更多有趣功能的开发者": "該函數面向希望實現更多有趣功能的開發者",
583
+ "代理所在地": "代理所在地",
584
+ "解析Jupyter Notebook文件": "解析Jupyter Notebook文件",
585
+ "观测窗": "觀測窗",
586
+ "更好的UI视觉效果": "更好的UI視覺效果",
587
+ "在此处替换您要搜索的关键词": "在此處替換您要搜索的關鍵詞",
588
+ "Token溢出": "Token溢出",
589
+ "这段代码来源 https": "這段代碼來源 https",
590
+ "请求超时": "請求超時",
591
+ "已经被转化过": "已經被轉化過",
592
+ "LLM_MODEL 格式不正确!": "LLM_MODEL 格式不正確!",
593
+ "先输入问题": "請輸入問題",
594
+ "灰色": "灰色",
595
+ "锌色": "鋅色",
596
+ "里面包含以指定类型为后缀名的所有文件的绝对路径": "包含指定類型後綴名的所有文件的絕對路徑",
597
+ "实现插件的热更新": "實現插件的熱更新",
598
+ "请对下面的文章片段用中文做概述": "請用中文概述下面的文章片段",
599
+ "如果需要在二级路径下运行": "如果需要在二級路徑下運行",
600
+ "的分析如下": "的分析如下",
601
+ "但端口号都应该在最显眼的位置上": "但端口號都應該在最顯眼的位置上",
602
+ "当输入部分的token占比小于限制的3/4时": "當輸入部分的token占比小於限制的3/4時",
603
+ "第一次运行": "第一次運行",
604
+ "失败了": "失敗了",
605
+ "如果包含数学公式": "如果包含數學公式",
606
+ "需要配合修改main.py才能生效!": "需要配合修改main.py才能生效!",
607
+ "它的作用是……额……就是不起作用": "它的作用是......额......就是不起作用",
608
+ "通过裁剪来缩短历史记录的长度": "通過裁剪來縮短歷史記錄的長度",
609
+ "chatGPT对话历史": "chatGPT對話歷史",
610
+ "它可以作为创建新功能函数的模板": "它可以作為創建新功能函數的模板",
611
+ "生成一个请求线程": "生成一個請求線程",
612
+ "$m$是质量": "$m$是質量",
613
+ ";4、引用数量": ";4、引用數量",
614
+ "NewBing响应缓慢": "NewBing響應緩慢",
615
+ "提交": "提交",
616
+ "test_联网回答问题": "test_聯網回答問題",
617
+ "加载tokenizer完毕": "加載tokenizer完畢",
618
+ "HotReload 的意思是热更新": "HotReload 的意思是熱更新",
619
+ "随便显示点什么防止卡顿的感觉": "隨便顯示點什麼防止卡頓的感覺",
620
+ "对整个Markdown项目进行翻译": "對整個Markdown項目進行翻譯",
621
+ "替换操作": "替換操作",
622
+ "然后通过getattr函数获取函数名": "然後通過getattr函數獲取函數名",
623
+ "并替换为空字符串": "並替換為空字符串",
624
+ "逐个文件分析已完成": "逐個文件分析已完成",
625
+ "填写之前不要忘记把USE_PROXY改成True": "填寫之前不要忘記把USE_PROXY改成True",
626
+ "不要遗漏括号": "不要遺漏括號",
627
+ "避免包括解释": "避免包括解釋",
628
+ "把newbing的长长的cookie放到这里": "把newbing的長長的cookie放到這裡",
629
+ "如API和代理网址": "如API和代理網址",
630
+ "模块预热": "模塊預熱",
631
+ "Latex项目全文英译中": "Latex項目全文英譯中",
632
+ "尝试计算比例": "嘗試計算比例",
633
+ "OpenAI所允許的最大並行過載": "OpenAI所允許的最大並行過載",
634
+ "向chatbot中添加簡單的意外錯誤信息": "向chatbot中添加簡單的意外錯誤信息",
635
+ "history至少釋放二分之一": "history至少釋放二分之一",
636
+ "”補上": "”補上",
637
+ "我們剝離Introduction之後的部分": "我們剝離Introduction之後的部分",
638
+ "嘗試加載": "嘗試加載",
639
+ "**函數功能**": "**函數功能**",
640
+ "藍色": "藍色",
641
+ "重置文件的創建時間": "重置文件的創建時間",
642
+ "再失敗就沒辦法了": "再失敗就沒辦法了",
643
+ "解析整個Python項目": "解析整個Python項目",
644
+ "此處不修改": "此處不修改",
645
+ "安裝ChatGLM的依賴": "安裝ChatGLM的依賴",
646
+ "使用wraps": "使用wraps",
647
+ "優先級1. 獲取環境變量作為配置": "優先級1. 獲取環境變量作為配置",
648
+ "遞歸地切割PDF文件": "遞歸地切割PDF文件",
649
+ "隨變按鈕的回調函數註冊": "隨變按鈕的回調函數註冊",
650
+ "我們": "我們",
651
+ "然後請使用Markdown格式封裝": "然後請使用Markdown格式封裝",
652
+ "網絡的遠程文件": "網絡的遠程文件",
653
+ "主进程统一调用函数接口": "主進程統一調用函數介面",
654
+ "请按以下描述给我发送图片": "請按以下描述給我發送圖片",
655
+ "正常对话时使用": "正常對話時使用",
656
+ "不需要高级参数": "不需要高級參數",
657
+ "双换行": "雙換行",
658
+ "初始值是摘要": "初始值是摘要",
659
+ "已经对该文章的所有片段总结完毕": "已經對該文章的所有片段總結完畢",
660
+ "proxies格式错误": "proxies格式錯誤",
661
+ "一次性完成": "一次性完成",
662
+ "设置一个token上限": "設置一個token上限",
663
+ "接下来": "接下來",
664
+ "以_array结尾的输入变量都是列表": "以_array結尾的輸入變量都是列表",
665
+ "收到以下文件": "收到以下文件",
666
+ "但显示Token不足": "但顯示Token不足",
667
+ "可以多线程并行": "可以多線程並行",
668
+ "带Cookies的Chatbot类": "帶Cookies的Chatbot類",
669
+ "空空如也的输入栏": "空空如也的輸入欄",
670
+ "然后回车键提交后即可生效": "然後回車鍵提交後即可生效",
671
+ "这是必应": "這是必應",
672
+ "聊天显示框的句柄": "聊天顯示框的句柄",
673
+ "集合文件": "集合文件",
674
+ "并显示到聊天当中": "並顯示到聊天當中",
675
+ "设置5秒即可": "設��5秒即可",
676
+ "不懂就填localhost或者127.0.0.1肯定错不了": "不懂就填localhost或者127.0.0.1肯定錯不了",
677
+ "安装方法": "安裝方法",
678
+ "Openai 限制免费用户每分钟20次请求": "Openai 限制免費用戶每分鐘20次請求",
679
+ "建议": "建議",
680
+ "将普通文本转换为Markdown格式的文本": "將普通文本轉換為Markdown格式的文本",
681
+ "应急食品是“原神”游戏中的角色派蒙的外号": "應急食品是“原神”遊戲中的角色派蒙的外號",
682
+ "不要修改!!": "不要修改!!",
683
+ "注意无论是inputs还是history": "注意無論是inputs還是history",
684
+ "读取Latex文件": "讀取Latex文件",
685
+ "\\n 翻译": "\\n 翻譯",
686
+ "第 1 步": "第 1 步",
687
+ "代理配置": "代理配置",
688
+ "temperature是LLM的内部调优参数": "temperature是LLM的內部調優參數",
689
+ "解析整个Lua项目": "解析整個Lua項目",
690
+ "重试几次": "重試幾次",
691
+ "接管gradio默认的markdown处理方式": "接管gradio默認的markdown處理方式",
692
+ "请注意自我隐私保护哦!": "請注意自我隱私保護哦!",
693
+ "导入软件依赖失败": "導入軟件依賴失敗",
694
+ "方便调试和定位问题": "方便調試和定位問題",
695
+ "请用代码块输出代码": "請用代碼塊輸出代碼",
696
+ "字符数小于100": "字符數小於100",
697
+ "程序终止": "程序終止",
698
+ "处理历史信息": "處理歷史信息",
699
+ "在界面上显示结果": "在界面上顯示結果",
700
+ "自动定位": "自動定位",
701
+ "读Tex论文写摘要": "讀Tex論文寫摘要",
702
+ "截断时的颗粒度": "截斷時的顆粒度",
703
+ "第 4 步": "第 4 步",
704
+ "正在处理中": "正在處理中",
705
+ "酸橙色": "酸橙色",
706
+ "分别为 __enter__": "分別為 __enter__",
707
+ "Json异常": "Json異常",
708
+ "输入过长已放弃": "輸入過長已放棄",
709
+ "按照章节切割PDF": "按照章節切割PDF",
710
+ "作为切分点": "作為切分點",
711
+ "用一句话概括程序的整体功能": "用一句話概括程序的整體功能",
712
+ "PDF文件也已经下载": "PDF文件也已經下載",
713
+ "您可能选择了错误的模型或请求源": "您可能選擇了錯誤的模型或請求源",
714
+ "则终止": "則終止",
715
+ "完成了吗": "完成了嗎",
716
+ "表示要搜索的文件类型": "表示要搜索的文件類型",
717
+ "文件内容是": "文件內容是",
718
+ "亮色主题": "亮色主題",
719
+ "函数插件输入输出接驳区": "函數插件輸入輸出接驳區",
720
+ "异步任务开始": "異步任務開始",
721
+ "Index 2 框框": "索引 2 框框",
722
+ "方便实现复杂的功能逻辑": "方便實現複雜的功能邏輯",
723
+ "警告": "警告",
724
+ "放在这里": "放在這裡",
725
+ "处理中途中止的情况": "處理中途中止的情況",
726
+ "结尾除去一次": "結尾除去一次",
727
+ "代码开源和更新": "代碼開源和更新",
728
+ "列表": "列表",
729
+ "状态": "狀態",
730
+ "第9步": "第9步",
731
+ "的标识": "的標識",
732
+ "Call jittorllms fail 不能正常加载jittorllms的参数": "Call jittorllms 失敗 不能正常加載 jittorllms 的參數",
733
+ "中性色": "中性色",
734
+ "优先": "優先",
735
+ "读取配置": "讀取配置",
736
+ "jittorllms消耗大量的内存": "jittorllms消耗大量的內存",
737
+ "Latex项目全文中译英": "Latex項目全文中譯英",
738
+ "在代理软件的设置里找": "在代理軟件的設置裡找",
739
+ "否则将导致每个人的NewBing问询历史互相渗透": "否則將導致每個人的NewBing問詢歷史互相滲透",
740
+ "这个函数运行在子进程": "這個函數運行在子進程",
741
+ "2. 长效解决方案": "2. 長效解決方案",
742
+ "Windows上还需要安装winrar软件": "Windows上還需要安裝winrar軟件",
743
+ "正在执行一些模块的预热": "正在執行一些模塊的預熱",
744
+ "一键DownloadArxivPapersAndTranslateAbstract": "一鍵DownloadArxivPapersAndTranslateAbstract",
745
+ "完成全部响应": "完成全部響應",
746
+ "输入中可能存在乱码": "輸入中可能存在亂碼",
747
+ "用了很多trick": "用了很多trick",
748
+ "填写格式是": "填寫格式是",
749
+ "预处理一波": "預處理一波",
750
+ "如果只询问1个大语言模型": "如果只詢問1個大語言模型",
751
+ "第二部分": "第二部分",
752
+ "或历史数据过长. 历史缓存数据已部分释放": "或歷史數據過長. 歷史緩存數據已部分釋放",
753
+ "文章内容是": "文章內容是",
754
+ "二、论文翻译": "二、論文翻譯",
755
+ "汇总报告已经添加到右侧“文件上传区”": "匯總報告已經添加到右側“檔案上傳區”",
756
+ "图像中转网址": "圖像中轉網址",
757
+ "第4次尝试": "第4次嘗試",
758
+ "越新越好": "越新越好",
759
+ "解决一个mdx_math的bug": "解決一個mdx_math的bug",
760
+ "中间过程不予显示": "中間過程不予顯示",
761
+ "路径或网址": "路徑或網址",
762
+ "您可以试试让AI写一个Related Works": "您可以試試讓AI寫一個Related Works",
763
+ "开始接收chatglm的回复": "開始接收chatglm的回覆",
764
+ "环境变量可以是": "環境變數可以是",
765
+ "请将此部分润色以满足学术标准": "請將此部分潤色以滿足學術標準",
766
+ "* 此函数未来将被弃用": "* 此函數未來將被棄用",
767
+ "替换其他特殊字符": "替換其他特殊字元",
768
+ "该模板可以实现ChatGPT联网信息综合": "該模板可以實現ChatGPT聯網資訊綜合",
769
+ "当前问答": "當前問答",
770
+ "洋红色": "洋紅色",
771
+ "不需要重启程序": "不需要重啟程式",
772
+ "所有线程同时开始执行任务函数": "所有線程同時開始執行任務函數",
773
+ "因此把prompt加入 history": "因此將prompt加入歷史",
774
+ "刷新界面": "重新整理介面",
775
+ "青色": "藍綠色",
776
+ "实时在UI上反馈远程数据流": "即時在UI上回饋遠程數據流",
777
+ "第一种情况": "第一種情況",
778
+ "的耐心": "的耐心",
779
+ "提取所有块元的文本信息": "提取所有塊元的文本信息",
780
+ "裁剪时": "裁剪時",
781
+ "对从 PDF 提取出的原始文本进行清洗和格式化处理": "對從PDF提取出的原始文本進行清洗和格式化處理",
782
+ "如果是第一次运行": "如果是第一次運行",
783
+ "程序完成": "程式完成",
784
+ "api-key不满足要求": "API金鑰不滿足要求",
785
+ "布尔值": "布林值",
786
+ "尝试导入依赖": "嘗試匯入相依性",
787
+ "逐个文件分析": "逐個檔案分析",
788
+ "详情见get_full_error的输出": "詳情見get_full_error的輸出",
789
+ "检测到": "偵測到",
790
+ "手动指定和筛选源代码文件类型": "手動指定和篩選原始程式碼檔案類型",
791
+ "进入任务等待状态": "進入任務等待狀態",
792
+ "当 输入部分的token占比 小于 全文的一半时": "當輸入部分的token佔比小於全文的一半時",
793
+ "查询代理的地理位置": "查詢代理的地理位置",
794
+ "是否在输入过长时": "是否在輸入過長時",
795
+ "chatGPT分析报告": "chatGPT分析報告",
796
+ "然后yeild出去": "然後yield出去",
797
+ "用户取消了程序": "使用者取消了程式",
798
+ "琥珀色": "琥珀色",
799
+ "这里是特殊函数插件的高级参数输入区": "這裡是特殊函數插件的高級參數輸入區",
800
+ "第 2 步": "第 2 步",
801
+ "字符串": "字串",
802
+ "检测到程序终止": "偵測到程式終止",
803
+ "对整个Latex项目进行润色": "對整個Latex專案進行潤色",
804
+ "方法则会被调用": "方法則會被調用",
805
+ "实验性函数调用出错": "實驗性函數調用出錯",
806
+ "把完整输入-输出结果显示在聊天框": "把完整輸入-輸出結果顯示在聊天框",
807
+ "本地文件预览": "本地檔案預覽",
808
+ "接下来请你逐文件分析下面的论文文件": "接下來請你逐檔案分析下面的論文檔案",
809
+ "英语关键词": "英語關鍵詞",
810
+ "一-鿿": "一-鿿",
811
+ "尝试识别section": "嘗試識別section",
812
+ "用于显示给用户": "用於顯示給使用者",
813
+ "newbing回复的片段": "newbing回覆的片段",
814
+ "的转化": "的轉換",
815
+ "将要忽略匹配的文件名": "將要忽略匹配的檔案名稱",
816
+ "生成正则表达式": "生成正則表示式",
817
+ "失败时的重试次数": "失敗時的重試次數",
818
+ "亲人两行泪": "親人兩行淚",
819
+ "故可以只分析文章内容": "故可以只分析文章內容",
820
+ "然后回车提交": "然後按下Enter提交",
821
+ "并提供改进建议": "並提供改進建議",
822
+ "不可多线程": "不可多執行緒",
823
+ "这个文件用于函数插件的单元测试": "這個檔案用於函數插件的單元測試",
824
+ "用一张Markdown表格简要描述以下文件的功能": "用一張Markdown表格簡要描述以下檔案的功能",
825
+ "可用clear将其清空": "可用clear將其清空",
826
+ "发送至LLM": "發送至LLM",
827
+ "先在input输入编号": "先在input輸入編號",
828
+ "更新失败": "更新失敗",
829
+ "相关功能不稳定": "相關功能不穩定",
830
+ "自动解压": "自動解壓",
831
+ "效果奇好": "效果奇佳",
832
+ "拆分过长的IPynb文件": "拆分過長的IPynb檔案",
833
+ "份搜索结果": "搜尋結果",
834
+ "如果没有指定文件名": "如果沒有指定檔案名稱",
835
+ "有$标识的公式符号": "有$標識的公式符號",
836
+ "跨平台": "跨平台",
837
+ "最终": "最終",
838
+ "第3次尝试": "第三次嘗試",
839
+ "检查代理服务器是否可用": "檢查代理伺服器是否可用",
840
+ "再例如一个包含了待处理文件的路径": "再例如一個包含了待處理檔案的路徑",
841
+ "注意文章中的每一句话都要翻译": "注意文章中的每一句話都要翻譯",
842
+ "修改它": "修改它",
843
+ "发送 GET 请求": "發送 GET 請求",
844
+ "判定为不是正文": "判定為不是正文",
845
+ "默认是.md": "預設是.md",
846
+ "终止按钮的回调函数注册": "終止按鈕的回調函數註冊",
847
+ "搜索需要处理的文件清单": "搜尋需要處理的檔案清單",
848
+ "当历史上下文过长时": "當歷史上下文過長時",
849
+ "不包含任何可用于": "不包含任何可用於",
850
+ "本项目现已支持OpenAI和API2D的api-key": "本專案現已支援OpenAI和API2D的api-key",
851
+ "异常原因": "異常原因",
852
+ "additional_fn代表点击的哪个按钮": "additional_fn代表點擊的哪個按鈕",
853
+ "注意": "注意",
854
+ "找不到任何.docx或doc文件": "找不到任何.docx或doc文件",
855
+ "刷新用户界面": "刷新使用者介面",
856
+ "失败": "失敗",
857
+ "Index 0 文本": "索引 0 文本",
858
+ "你需要翻译以下内容": "你需要翻譯以下內容",
859
+ "chatglm 没有 sys_prompt 接口": "chatglm 沒有 sys_prompt 介面",
860
+ "您的 API_KEY 是": "您的 API_KEY 是",
861
+ "请缩减输入文件的数量": "請減少輸入檔案的數量",
862
+ "并且将结合上下文内容": "並且將結合上下文內容",
863
+ "返回当前系统中可用的未使用端口": "返回目前系統中可用的未使用埠口",
864
+ "以下配置可以优化体验": "以下配置可以優化體驗",
865
+ "常规情况下": "一般情況下",
866
+ "递归": "遞迴",
867
+ "分解代码文件": "分解程式碼檔案",
868
+ "用户反馈": "使用者回饋",
869
+ "第 0 步": "第 0 步",
870
+ "即将更新pip包依赖……": "即將更新pip套件相依性......",
871
+ "请从": "請從",
872
+ "第二种情况": "第二種情況",
873
+ "NEWBING_COOKIES未填寫或有格式錯誤": "NEWBING_COOKIES未填寫或格式錯誤",
874
+ "以上材料已經被寫入": "以上材料已經被寫入",
875
+ "找圖片": "尋找圖片",
876
+ "函數插件-固定按鈕區": "函數插件-固定按鈕區",
877
+ "該文件中主要包含三個函數": "該文件主要包含三個函數",
878
+ "用於與with語句一起使用": "用於與with語句一起使用",
879
+ "插件初始化中": "插件初始化中",
880
+ "文件讀取完成": "文件讀取完成",
881
+ "讀取文件": "讀取文件",
882
+ "高危設置!通過修改此設置": "高危設置!通過修改此設置",
883
+ "所有文件都總結完成了嗎": "所有文件都總結完成了嗎",
884
+ "限制的3/4時": "限制的3/4時",
885
+ "取決於": "取決於",
886
+ "預處理": "預處理",
887
+ "至少一個線程任務Token溢出而失敗": "至少一個線程任務Token溢出而失敗",
888
+ "一、論文概況": "一、論文概況",
889
+ "TGUI不支持函數插件的實現": "TGUI不支持函數插件的實現",
890
+ "拒絕服務": "拒絕服務",
891
+ "請更換為API_URL_REDIRECT配置": "請更換為API_URL_REDIRECT配置",
892
+ "是否自動處理token溢出的情況": "是否自動處理token溢出的情況",
893
+ "和": "和",
894
+ "双层列表": "雙層列表",
895
+ "做一些外观色彩上的调整": "做一些外觀色彩上的調整",
896
+ "发送请求到子进程": "發送請求到子進程",
897
+ "配置信息如下": "配置信息如下",
898
+ "从而实现分批次处理": "從而實現分批次處理",
899
+ "找不到任何.ipynb文件": "找不到任何.ipynb文件",
900
+ "代理网络的地址": "代理網絡的地址",
901
+ "新版本": "新版本",
902
+ "用于实现Python函数插件的热更新": "用於實現Python函數插件的熱更新",
903
+ "将中文句号": "將中文句號",
904
+ "警告!被保存的对话历史可以被使用该系统的任何人查阅": "警告!被保存的對話歷史可以被使用該系統的任何人查閱",
905
+ "用于数据流可视化": "用於數據流可視化",
906
+ "第三部分": "第三部分",
907
+ "界面更新": "界面更新",
908
+ "**输出参数说明**": "**輸出參數說明**",
909
+ "其中$E$是能量": "其中$E$是能量",
910
+ "这个内部函数可以将函数的原始定义更新为最新版本": "這個內部函數可以將函數的原始定義更新為最新版本",
911
+ "不要修改任何LaTeX命令": "不要修改任何LaTeX命令",
912
+ "英译中": "英譯中",
913
+ "将错误显示出来": "顯示錯誤",
914
+ "*代表通配符": "*代表通配符",
915
+ "找不到任何lua文件": "找不到任何lua文件",
916
+ "准备文件的下载": "準備下載文件",
917
+ "爬取搜索引擎的结果": "爬取搜尋引擎的結果",
918
+ "例如在windows cmd中": "例如在windows cmd中",
919
+ "一般原样传递下去就行": "一般原樣傳遞下去就行",
920
+ "免费用户填3": "免費用戶填3",
921
+ "在汇总报告中隐藏啰嗦的真实输入": "在匯總報告中隱藏啰嗦的真實輸入",
922
+ "Tiktoken未知错误": "Tiktoken未知錯誤",
923
+ "整理结果": "整理結果",
924
+ "也许等待十几秒后": "也許等待十幾秒後",
925
+ "将匹配到的数字作为替换值": "將匹配到的數字作為替換值",
926
+ "对每一个源代码文件": "對每一個源代碼文件",
927
+ "补上后面的": "補上後面的",
928
+ "调用时": "調用時",
929
+ "也支持同时填写多个api-key": "也支持同時填寫多個api-key",
930
+ "第二层列表是对话历史": "第二層列表是對話歷史",
931
+ "询问多个GPT模型": "詢問多個GPT模型",
932
+ "您可能需要手动安装新增的依赖库": "您可能需要手動安裝新增的依賴庫",
933
+ "隨機負載均衡": "隨機負載均衡",
934
+ "等待多線程操作": "等待多線程操作",
935
+ "質能方程式": "質能方程式",
936
+ "需要預先pip install py7zr": "需要預先pip install py7zr",
937
+ "是否丟棄掉 不是正文的內容": "是否丟棄掉 不是正文的內容",
938
+ "加載失敗!": "加載失敗!",
939
+ "然後再寫一段英文摘要": "然後再寫一段英文摘要",
940
+ "從以上搜索結果中抽取信息": "從以上搜索結果中抽取信息",
941
+ "response中會攜帶traceback報錯信息": "response中會攜帶traceback報錯信息",
942
+ "放到history中": "放到history中",
943
+ "不能正常加載jittorllms的參數!": "不能正常加載jittorllms的參數!",
944
+ "需要預先pip install rarfile": "需要預先pip install rarfile",
945
+ "以免輸入溢出": "以免輸入溢出",
946
+ "MOSS消耗大量的內存": "MOSS消耗大量的內存",
947
+ "獲取預處理函數": "獲取預處理函數",
948
+ "缺少MOSS的依賴": "缺少MOSS的依賴",
949
+ "多線程": "多線程",
950
+ "結束": "結束",
951
+ "請使用Markdown": "請使用Markdown",
952
+ "匹配^數字^": "匹配^數字^",
953
+ "负责把学术论文准确翻译成中文": "負責將學術論文準確翻譯成中文",
954
+ "否则可能导致显存溢出而造成卡顿": "否則可能導致顯存溢出而造成卡頓",
955
+ "不输入即全部匹配": "不輸入即全部匹配",
956
+ "下面是一些学术文献的数据": "下面是一些學術文獻的數據",
957
+ "网络卡顿、代理失败、KEY失效": "網絡卡頓、代理失敗、KEY失效",
958
+ "其他的排队等待": "其他的排隊等待",
959
+ "表示要搜索的文件或者文件夹路径或网络上的文件": "表示要搜索的文件或者文件夾路徑或網絡上的文件",
960
+ "当输入部分的token占比": "當輸入部分的token佔比",
961
+ "你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性": "你的任務是改進所提供文本的拼寫、語法、清晰、簡潔和整體可讀性",
962
+ "这是什么功能": "這是什麼功能",
963
+ "剩下的情况都开头除去": "剩下的情況都開頭除去",
964
+ "清除换行符": "清除換行符",
965
+ "请提取": "請提取",
966
+ "覆盖和重启": "覆蓋和重啟",
967
+ "发送至chatGPT": "發送至chatGPT",
968
+ "+ 已经汇总的文件组": "+ 已經匯總的文件組",
969
+ "插件": "插件",
970
+ "OpenAI模型选择是": "OpenAI模型選擇是",
971
+ "原文": "原文",
972
+ "您可以随时在history子文件夹下找回旧版的程序": "您可以隨時在history子文件夾下找回舊版的程序",
973
+ "以确保一些资源在代码块执行期间得到正确的初始化和清理": "以確保一些資源在程式碼區塊執行期間得到正確的初始化和清理",
974
+ "它们会继续向下调用更底层的LLM模型": "它們會繼續向下調用更底層的LLM模型",
975
+ "GPT输出格式错误": "GPT輸出格式錯誤",
976
+ "中译英": "中譯英",
977
+ "无代理状态下很可能无法访问OpenAI家族的模型": "無代理狀態下很可能無法訪問OpenAI家族的模型",
978
+ "已失败": "已失敗",
979
+ "最大线程数": "最大線程數",
980
+ "读取时首先看是否存在私密的config_private配置文件": "讀取時首先看是否存在私密的config_private配置文件",
981
+ "必要时": "必要時",
982
+ "在装饰器内部": "在裝飾器內部",
983
+ "api2d 正常完成": "api2d 正常完成",
984
+ "您可以调用“LoadConversationHistoryArchive”还原当下的对话": "您可以調用“LoadConversationHistoryArchive”還原當下的對話",
985
+ "找不到任何golang文件": "找不到任何golang文件",
986
+ "找不到任何rust文件": "找不到任何rust文件",
987
+ "输入了已经经过转化的字符串": "輸入了已經經過轉換的字串",
988
+ "是否在结束时": "是否在結束時",
989
+ "存档文件详情": "存檔文件詳情",
990
+ "用英文逗号分割": "用英文逗號分割",
991
+ "已删除": "已刪除",
992
+ "收到消息": "收到訊息",
993
+ "系统输入": "系統輸入",
994
+ "读取配置文件": "讀取配置檔",
995
+ "跨线程传递": "跨線程傳遞",
996
+ "Index 1 字体": "索引 1 字型",
997
+ "设定一个最小段落长度阈值": "設定最小段落長度閾值",
998
+ "流式获取输出": "流式取得輸出",
999
+ "默认按钮颜色是 secondary": "預設按鈕顏色為 secondary",
1000
+ "请对下面的程序文件做一个概述": "請對下面的程式檔案做一個概述",
1001
+ "当文件被上传时的回调函数": "當檔案被上傳時的回撥函數",
1002
+ "对话窗的高度": "對話窗的高度",
1003
+ "Github更新地址": "Github更新位址",
1004
+ "然后在用常规的": "然後再用常規的",
1005
+ "读取Markdown文件": "讀取Markdown檔案",
1006
+ "会把列表拆解": "會拆解列表",
1007
+ "OpenAI绑定信用卡可解除频率限制": "OpenAI綁定信用卡可解除頻率限制",
1008
+ "可能需要一点时间下载参数": "可能需要一點時間下載參數",
1009
+ "需要访问谷歌": "需要訪問谷歌",
1010
+ "根据给定的匹配结果来判断换行符是否表示段落分隔": "根據給定的匹配結果來判斷換行符是否表示段落分隔",
1011
+ "请提交新问题": "請提交新問題",
1012
+ "测试功能": "測試功能",
1013
+ "尚未充分测试的函数插件": "尚未充分測試的函數插件",
1014
+ "解析此项目本身": "解析此專案本身",
1015
+ "提取摘要": "提取摘要",
1016
+ "用于输入给GPT的前提提示": "用於輸入給GPT的前提提示",
1017
+ "第一步": "第一步",
1018
+ "此外": "此外",
1019
+ "找不到任何前端相关文件": "找不到任何前端相關檔案",
1020
+ "输入其他/无输入+回车=不更新": "輸入其他/無輸入+回車=不更新",
1021
+ "句号": "句號",
1022
+ "如果最后成功了": "如果最後成功了",
1023
+ "导致输出不完整": "導致輸出不完整",
1024
+ "并修改代码拆分file_manifest列表": "並修改程式碼拆分file_manifest列表",
1025
+ "在读取API_KEY时": "在讀取API_KEY時",
1026
+ "迭代地历遍整个文章": "迭代地歷遍整個文章",
1027
+ "存在一行极长的文本!": "存在一行極長的文字!",
1028
+ "private_upload里面的文件名在解压zip后容易出现乱码": "private_upload裡面的檔案名在解壓縮zip後容易出現亂碼",
1029
+ "清除当前溢出的输入": "清除當前溢出的輸入",
1030
+ "只输出转化后的英文代码": "只輸出轉換後的英文程式碼",
1031
+ "打开插件列表": "打開外掛程式列表",
1032
+ "查询版本和用户意见": "查詢版本和使用者意見",
1033
+ "需要用此选项防止高频地请求openai导致错误": "需要用此選項防止高頻地請求openai導致錯誤",
1034
+ "有肉眼不可见的小变化": "有肉眼不可見的小變化",
1035
+ "返回一个新的字符串": "返回一個新的字串",
1036
+ "如果是.doc文件": "如果是.doc文件",
1037
+ "英语学术润色": "英語學術潤色",
1038
+ "已经全部完成": "已經全部完成",
1039
+ "该文件中主要包含2个函数": "該文件中主要包含2個函數",
1040
+ "捕捉函数f中的异常并封装到一个生成器中返回": "捕捉函數f中的異常並封裝到一個生成器中返回",
1041
+ "兼容旧版的配置": "兼容舊版的配置",
1042
+ "LLM的内部调优参数": "LLM的內部調優參數",
1043
+ "请查收": "請查收",
1044
+ "输出了前面的": "輸出了前面的",
1045
+ "用多种方式组合": "用多種方式組合",
1046
+ "等待中": "等待中",
1047
+ "从最长的条目开始裁剪": "從最長的條目開始裁剪",
1048
+ "就是临时文件夹的路径": "就是臨時文件夾的路徑",
1049
+ "体验gpt-4可以试试api2d": "體驗gpt-4可以試試api2d",
1050
+ "提交任务": "提交任務",
1051
+ "已配置": "已配置",
1052
+ "第三方库": "第三方庫",
1053
+ "将y中最后一项的输入部分段落化": "將y中最後一項的輸入部分段落化",
1054
+ "高级函数插件": "Advanced Function Plugin",
1055
+ "等待jittorllms响应中": "Waiting for jittorllms response",
1056
+ "解析整个C++项目": "Parsing the entire C++ project",
1057
+ "你是一名专业的学术教授": "You are a professional academic professor",
1058
+ "截断重试": "Truncated retry",
1059
+ "即在代码结构不变得情况下取代其他的上下文管理器": "That is, replace other context managers without changing the code structure",
1060
+ "表示函数是否成功执行": "Indicates whether the function was executed successfully",
1061
+ "处理多模型并行等细节": "Handling details such as parallelism of multiple models",
1062
+ "不显示中间过程": "Do not display intermediate process",
1063
+ "chatGPT的内部调优参数": "Internal tuning parameters of chatGPT",
1064
+ "你必须使用Markdown表格": "You must use Markdown tables",
1065
+ "第 5 步": "Step 5",
1066
+ "jittorllms响应异常": "jittorllms response exception",
1067
+ "在项目根目录运行这两个指令": "Run these two commands in the project root directory",
1068
+ "获取tokenizer": "Get tokenizer",
1069
+ "chatbot 为WebUI中显示的对话列表": "chatbot is the list of dialogues displayed in WebUI",
1070
+ "test_解析一个Cpp项目": "test_parse a Cpp project",
1071
+ "将对话记录history以Markdown格式写入文件中": "Write the dialogue record history to a file in Markdown format",
1072
+ "装饰器函数": "Decorator function",
1073
+ "玫瑰色": "Rose color",
1074
+ "将单空行": "刪除單行空白",
1075
+ "祖母绿": "綠松石色",
1076
+ "整合所有信息": "整合所有資訊",
1077
+ "如温度和top_p等": "例如溫度和top_p等",
1078
+ "重试中": "重試中",
1079
+ "月": "月份",
1080
+ "localhost意思是代理软件安装在本机上": "localhost意思是代理軟體安裝在本機上",
1081
+ "gpt_log/**/chatGPT对话历史*.html": "gpt_log/**/chatGPT對話歷史*.html",
1082
+ "的长度必须小于 2500 个 Token": "長度必須小於 2500 個 Token",
1083
+ "抽取可用的api-key": "提取可用的api-key",
1084
+ "增强报告的可读性": "增強報告的可讀性",
1085
+ "对话历史": "對話歷史",
1086
+ "-1代表随机端口": "-1代表隨機端口",
1087
+ "在函数插件中被调用": "在函數插件中被調用",
1088
+ "向chatbot中添加错误信息": "向chatbot中添加錯誤訊息",
1089
+ "代理可能无效": "代理可能無效",
1090
+ "比如introduction": "例如introduction",
1091
+ "接下来请你逐文件分析下面的工程": "接下來請你逐文件分析下面的工程",
1092
+ "任务函数": "任務函數",
1093
+ "删除所有历史对话文件": "刪除所有歷史對話檔案",
1094
+ "找不到任何.md文件": "找不到任何.md文件",
1095
+ "给出输出文件清单": "給出輸出文件清單",
1096
+ "不能正常加载ChatGLM的参数!": "無法正常加載ChatGLM的參數!",
1097
+ "不详": "不詳",
1098
+ "提取出以下内容": "提取出以下內容",
1099
+ "请注意": "請注意",
1100
+ "不能加载Newbing组件": "無法加載Newbing組件",
1101
+ "您既可以在config.py中修改api-key": "您可以在config.py中修改api-key",
1102
+ "但推荐上传压缩文件": "但建議上傳壓縮文件",
1103
+ "支持任意数量的llm接口": "支持任意數量的llm接口",
1104
+ "材料如下": "材料如下",
1105
+ "停止": "停止",
1106
+ "gradio的inbrowser触发不太稳定": "gradio的inbrowser觸發不太穩定",
1107
+ "带token约简功能": "帶token約簡功能",
1108
+ "解析项目": "解析項目",
1109
+ "尝试识别段落": "嘗試識別段落",
1110
+ "输入栏用户输入的文本": "輸入欄用戶輸入的文本",
1111
+ "清理规则包括": "清理規則包括",
1112
+ "新版配置": "新版配置",
1113
+ "如果有": "如果有",
1114
+ "高級參數輸入區": "#",
1115
+ "您提供的api-key不滿足要求": "#",
1116
+ "“喂狗”": "#",
1117
+ "有線程鎖": "#",
1118
+ "解析整個CSharp項目": "#",
1119
+ "上下文管理器必須實現兩個方法": "#",
1120
+ "Call MOSS fail 不能正常加載MOSS的參數": "#",
1121
+ "獲取圖片URL": "#",
1122
+ "輸入部分太自由": "#",
1123
+ "Not enough point. API2D賬戶點數不足": "#",
1124
+ "網絡錯誤": "#",
1125
+ "請開始多線程操作": "#",
1126
+ "authors獲取失敗": "#",
1127
+ "、地址": "#",
1128
+ "根據以上分析": "#",
1129
+ "1、英文題目;2、中文題目翻譯;3、作者;4、arxiv公開": "#",
1130
+ "一些普通功能模塊": "#",
1131
+ "參數簡單": "#",
1132
+ "具備以下功能": "#",
1133
+ "優先級2. 獲取config_private中的配置": "#",
1134
+ "汇总报告如何远程获取": "如何遠程獲取匯總報告",
1135
+ "热更新prompt": "熱更新提示",
1136
+ "插件调度异常": "插件調度異常",
1137
+ "英文Latex项目全文润色": "英文Latex項目全文潤色",
1138
+ "此外我们也提供可同步处理大量文件的多线程Demo供您参考": "此外我們也提供可同步處理大量文件的多線程Demo供您參考",
1139
+ "则不解析notebook中的Markdown块": "則不解析notebook中的Markdown塊",
1140
+ "备选输入区": "備選輸入區",
1141
+ "个片段": "個片段",
1142
+ "总结输出": "總結輸出",
1143
+ "2. 把输出用的余量留出来": "2. 把輸出用的餘量留出來",
1144
+ "请对下面的文章片段做一个概述": "請對下面的文章片段做一個概述",
1145
+ "多线程方法": "多線程方法",
1146
+ "下面是对每个参数和返回值的说明": "下面是對每個參數和返回值的說明",
1147
+ "由于请求gpt需要一段时间": "由於請求gpt需要一段時間",
1148
+ "历史": "歷史",
1149
+ "用空格或段落分隔符替换原换行符": "用空格或段落分隔符替換原換行符",
1150
+ "查找语法错误": "查找語法錯誤",
1151
+ "输出 Returns": "輸出 Returns",
1152
+ "在config.py中配置": "在config.py中配置",
1153
+ "找不到任何.tex文件": "找不到任何.tex文件",
1154
+ "一键更新协议": "一鍵更新協議",
1155
+ "gradio版本较旧": "gradio版本較舊",
1156
+ "灵活而简洁": "靈活而簡潔",
1157
+ "等待NewBing响应中": "等待NewBing響應中",
1158
+ "更多函数插件": "更多函數插件",
1159
+ "作为一个标识而存在": "作為一個標識而存在",
1160
+ "GPT模型返回的回复字符串": "GPT模型返回的回復字串",
1161
+ "请从给定的若干条搜索结果中抽取信息": "請從給定的若干條搜索結果中抽取信息",
1162
+ "请对下面的文章片段做概述": "請對下面的文章片段做概述",
1163
+ "历史对话输入": "歷史對話輸入",
1164
+ "请稍等": "請稍等",
1165
+ "整理报告的格式": "整理報告的格式",
1166
+ "保存当前的对话": "保存當前的對話",
1167
+ "代理所在地查询超时": "代理所在地查詢超時",
1168
+ "inputs 是本次问询的输入": "inputs是本次問詢的輸入",
1169
+ "网页的端口": "網頁的端口",
1170
+ "仅仅服务于视觉效果": "僅僅服務於視覺效果",
1171
+ "把结果写入文件": "把結果寫入文件",
1172
+ "留空即可": "留空即可",
1173
+ "按钮颜色": "按鈕顏色",
1174
+ "借鉴了 https": "借鉴了 https",
1175
+ "Token溢出数": "Token溢出數",
1176
+ "找不到任何java文件": "找��到任何java文件",
1177
+ "批量总结Word文档": "批量總結Word文檔",
1178
+ "一言以蔽之": "一言以蔽之",
1179
+ "提取字体大小是否近似相等": "提取字體大小是否近似相等",
1180
+ "直接给定文件": "直接給定文件",
1181
+ "使用该模块需要额外依赖": "使用該模塊需要額外依賴",
1182
+ "的配置": "的配置",
1183
+ "pip install python-docx 用于docx格式": "pip install python-docx 用於docx格式",
1184
+ "正在查找对话历史文件": "正在查找對話歷史文件",
1185
+ "输入已识别为openai的api_key": "輸入已識別為openai的api_key",
1186
+ "对整个Latex项目进行翻译": "對整個Latex項目進行翻譯",
1187
+ "Y+回车=确认": "Y+回車=確認",
1188
+ "正在同时咨询ChatGPT和ChatGLM……": "正在同時諮詢ChatGPT和ChatGLM……",
1189
+ "根据 heuristic 规则": "根據heuristic規則",
1190
+ "如256x256": "如256x256",
1191
+ "函数插件区": "函數插件區",
1192
+ "*** API_KEY 导入成功": "*** API_KEY 導入成功",
1193
+ "请对下面的程序文件做一个概述文件名是": "請對下面的程序文件做一個概述文件名是",
1194
+ "替換跨行的連詞": "#",
1195
+ "內容太長了都會觸發token數量溢出的錯誤": "#",
1196
+ "尚未完成全部響應": "#",
1197
+ "生成帶有段落標籤的HTML代碼": "#",
1198
+ "函數熱更新是指在不停止程序運行的情況下": "#",
1199
+ "將Unsplash API中的PUT_YOUR_QUERY_HERE替換成描述該事件的一個最重要的單詞": "#",
1200
+ "沒有提供高級參數功能說明": "#",
1201
+ "條": "#",
1202
+ "請刷新界面重試": "#",
1203
+ "和openai的連接容易斷掉": "#",
1204
+ "使用 Unsplash API": "#",
1205
+ "完成情況": "#",
1206
+ "迭代上一次的結果": "#",
1207
+ "每個線程都要“餵狗”": "#",
1208
+ "最多收納多少個網頁的結果": "#",
1209
+ "日": "#",
1210
+ "第4步": "#",
1211
+ "找不到任何python文件": "#",
1212
+ "經過充分測試": "#",
1213
+ "缺少的依賴": "#",
1214
+ "分组+迭代处理": "分組+迭代處理",
1215
+ "安装Newbing的依赖": "安裝Newbing的依賴",
1216
+ "批": "批",
1217
+ "代理与自动更新": "代理與自動更新",
1218
+ "读取pdf文件并清理其中的文本内容": "讀取pdf文件並清理其中的文本內容",
1219
+ "多线程Demo": "多線程Demo",
1220
+ "\\cite和方程式": "\\cite和方程式",
1221
+ "可能会导致严重卡顿": "可能會導致嚴重卡頓",
1222
+ "将Markdown格式的文本转换为HTML格式": "將Markdown格式的文本轉換為HTML格式",
1223
+ "建议您复制一个config_private.py放自己的秘密": "建議您複製一個config_private.py放自己的秘密",
1224
+ "质能方程可以写成$$E=mc^2$$": "質能方程可以寫成$$E=mc^2$$",
1225
+ "的文件": "的文件",
1226
+ "是本次问询的输入": "是本次問詢的輸入",
1227
+ "第三种情况": "第三種情況",
1228
+ "如果同时InquireMultipleLargeLanguageModels": "如果同時InquireMultipleLargeLanguageModels",
1229
+ "小于正文的": "小於正文的",
1230
+ "将输入和输出解析为HTML格式": "將輸入和輸出解析為HTML格式",
1231
+ "您正在调用一个": "您正在調用一個",
1232
+ "缺少jittorllms的依赖": "缺少jittorllms的依賴",
1233
+ "是否重置": "是否重置",
1234
+ "解析整个前端项目": "解析整個前端專案",
1235
+ "是否唤起高级插件参数区": "是否喚起高級插件參數區",
1236
+ "pip包依赖安装出现问题": "pip包依賴安裝出現問題",
1237
+ "请先转化为.docx格式": "請先轉換為.docx格式",
1238
+ "整理history": "整理歷史記錄",
1239
+ "缺少api_key": "缺少api_key",
1240
+ "拆分过长的latex文件": "拆分過長的latex文件",
1241
+ "使用markdown表格输出结果": "使用markdown表格輸出結果",
1242
+ "搜集初始信息": "搜集初始信息",
1243
+ "但还没输出完后面的": "但還沒輸出完後面的",
1244
+ "在上下文执行开始的情况下": "在上下文執行開始的情況下",
1245
+ "不要用代码块": "不要用代碼塊",
1246
+ "比如你是翻译官怎样怎样": "例如你是翻譯官怎樣怎樣",
1247
+ "装饰器函数返回内部函数": "裝飾器函數返回內部函數",
1248
+ "请你作为一个学术翻译": "請你作為一個學術翻譯",
1249
+ "清除重复的换行": "清除重複的換行",
1250
+ "换行 -": "換行 -",
1251
+ "你好": "你好",
1252
+ "触发重置": "觸發重置",
1253
+ "安装MOSS的依赖": "安裝MOSS的依賴",
1254
+ "首先你在英文語境下通讀整篇論文": "首先你在英文語境下通讀整篇論文",
1255
+ "需要清除首尾空格": "需要清除首尾空格",
1256
+ "多線程函數插件中": "多線程函數插件中",
1257
+ "分析用戶提供的谷歌學術": "分析用戶提供的谷歌學術",
1258
+ "基本信息": "基本信息",
1259
+ "python 版本建議3.9+": "python 版本建議3.9+",
1260
+ "開始請求": "開始請求",
1261
+ "不會實時顯示在界面上": "不會實時顯示在界面上",
1262
+ "接下來兩句話只顯示在界面上": "接下來兩句話只顯示在界面上",
1263
+ "根據當前的模型類別": "根據當前的模型類別",
1264
+ "10���文件為一組": "10個文件為一組",
1265
+ "第三組插件": "第三組插件",
1266
+ "此函數逐漸地搜索最長的條目進行剪輯": "此函數逐漸地搜索最長的條目進行剪輯",
1267
+ "拆分過長的Markdown文件": "拆分過長的Markdown文件",
1268
+ "最多同時執行5個": "最多同時執行5個",
1269
+ "裁剪input": "裁剪input",
1270
+ "現在您點擊任意“紅顏色”標識的函數插件時": "現在您點擊任意“紅顏色”標識的函數插件時",
1271
+ "且沒有代碼段": "且沒有代碼段",
1272
+ "建議低於1": "建議低於1",
1273
+ "並且對於網絡上的文件": "並且對於網絡上的文件",
1274
+ "文件代码是": "檔案代碼是",
1275
+ "我上传了文件": "我上傳了檔案",
1276
+ "年份获取失败": "年份獲取失敗",
1277
+ "解析网页内容": "解析網頁內容",
1278
+ "但内部用stream的方法避免中途网线被掐": "但內部使用stream的方法避免中途網路斷線",
1279
+ "这个函数用于分割pdf": "這個函數用於分割PDF",
1280
+ "概括其内容": "概括其內容",
1281
+ "请谨慎操作": "請謹慎操作",
1282
+ "更新UI": "更新使用者介面",
1283
+ "输出": "輸出",
1284
+ "请先从插件列表中选择": "請先從插件列表中選擇",
1285
+ "函数插件": "函數插件",
1286
+ "的方式启动": "的方式啟動",
1287
+ "否则在回复时会因余量太少出问题": "否則在回覆時會因餘量太少出問題",
1288
+ "并替换为回车符": "並替換為換行符號",
1289
+ "Newbing失败": "Newbing失敗",
1290
+ "找不到任何.h头文件": "找不到任何.h頭檔案",
1291
+ "执行时": "執行時",
1292
+ "不支持通过环境变量设置!": "不支持透過環境變數設置!",
1293
+ "获取完整的从Openai返回的报错": "獲取完整的從Openai返回的錯誤",
1294
+ "放弃": "放棄",
1295
+ "系统静默prompt": "系統靜默提示",
1296
+ "如果子任务非常多": "如果子任務非常多",
1297
+ "打印traceback": "列印追蹤信息",
1298
+ "前情提要": "前情提要",
1299
+ "请在config文件中修改API密钥之后再运行": "請在config文件中修改API密鑰之後再運行",
1300
+ "使用正则表达式查找注释": "使用正則表達式查找註釋",
1301
+ "这段代码定义了一个名为DummyWith的空上下文管理器": "這段代碼定義了一個名為DummyWith的空上下文管理器",
1302
+ "用学术性语言写一段中文摘要": "用學術性語言寫一段中文摘要",
1303
+ "优先级3. 获取config中的配置": "優先級3. 獲取config中的配置",
1304
+ "此key无效": "此key無效",
1305
+ "对话历史列表": "對話歷史列表",
1306
+ "循环轮询各个线程是否执行完毕": "循環輪詢各個線程是否執行完畢",
1307
+ "处理数据流的主体": "處理數據流的主體",
1308
+ "综合": "綜合",
1309
+ "感叹号": "感嘆號",
1310
+ "浮点数": "浮點數",
1311
+ "必要时再进行切割": "必要時再進行切割",
1312
+ "请注意proxies选项的格式": "請注意proxies選項的格式",
1313
+ "我需要你找一张网络图片": "我需要你找一張網絡圖片",
1314
+ "裁剪输入": "裁剪輸入",
1315
+ "这里其实不需要join了": "這裡其實不需要join了",
1316
+ "例如 v2**y 和 ss* 的默认本地协议是socks5h": "例如 v2**y 和 ss* 的默認本地協議是socks5h",
1317
+ "粉红色": "粉紅色",
1318
+ "llm_kwargs参数": "llm_kwargs參數",
1319
+ "设置gradio的并行线程数": "設置gradio的並行線程數",
1320
+ "端口": "端口",
1321
+ "将每个换行符替换为两个换行符": "將每個換行符替換為兩個換行符",
1322
+ "防止回答时Token溢出": "防止回答時Token溢出",
1323
+ "单线": "單線",
1324
+ "成功读取环境变量": "成功讀取環境變量",
1325
+ "GPT返回的结果": "GPT返回的結果",
1326
+ "函数插件功能": "函數插件功能",
1327
+ "根据前后相邻字符的特点": "根據前後相鄰字符的特點",
1328
+ "发送到chatgpt进行分析": "發送到chatgpt進行分析",
1329
+ "例如": "例如",
1330
+ "翻译": "翻譯",
1331
+ "选择放弃": "選擇放棄",
1332
+ "将输出代码片段的“后面的": "將輸出代碼片段的“後面的",
1333
+ "两个指令来安装jittorllms的依赖": "兩個指令來安裝jittorllms的依賴",
1334
+ "不在arxiv中无法获取完整摘要": "無法在arxiv中取得完整摘要",
1335
+ "读取默认值作为数据类型转换的参考": "讀取預設值作為資料型態轉換的參考",
1336
+ "最后": "最後",
1337
+ "用于负责跨越线程传递已经输出的部分": "用於負責跨越線程傳遞已經輸出的部分",
1338
+ "请避免混用多种jittor模型": "請避免混用多種jittor模型",
1339
+ "等待输入": "等待輸入",
1340
+ "默认": "預設",
1341
+ "读取PDF文件": "讀取PDF文件",
1342
+ "作为一名中文学术论文写作改进助理": "作為一名中文學術論文寫作改進助理",
1343
+ "如果WEB_PORT是-1": "如果WEB_PORT是-1",
1344
+ "虽然不同的代理软件界面不一样": "雖然不同的代理軟體介面不一樣",
1345
+ "选择LLM模型": "選擇LLM模型",
1346
+ "回车退出": "按Enter退出",
1347
+ "第3步": "��3步",
1348
+ "找到原文本中的换行符": "找到原文本中的換行符號",
1349
+ "表示文件所在的文件夹路径": "表示文件所在的資料夾路徑",
1350
+ "您可以请再次尝试.": "您可以請再次嘗試。",
1351
+ "其他小工具": "其他小工具",
1352
+ "开始问问题": "開始問問題",
1353
+ "默认值": "預設值",
1354
+ "正在获取文献名!": "正在獲取文獻名稱!",
1355
+ "也可以在问题输入区输入临时的api-key": "也可以在問題輸入區輸入臨時的api-key",
1356
+ "单$包裹begin命令时多余": "單$包裹begin命令時多餘",
1357
+ "从而达到实时更新功能": "從而達到實時更新功能",
1358
+ "开始接收jittorllms的回复": "開始接收jittorllms的回覆",
1359
+ "防止爆token": "防止爆token",
1360
+ "等待重试": "等待重試",
1361
+ "解析整个Go项目": "解析整個Go項目",
1362
+ "解析整个Rust项目": "解析整個Rust項目",
1363
+ "则随机选取WEB端口": "則隨機選取WEB端口",
1364
+ "不输入代表全部匹配": "不輸入代表全部匹配",
1365
+ "在前端打印些好玩的东西": "在前端打印些好玩的東西",
1366
+ "而在上下文执行结束时": "而在上下文執行結束時",
1367
+ "会自动使用已配置的代理": "會自動使用已配置的代理",
1368
+ "第 3 步": "第 3 步",
1369
+ "稍微留一点余地": "稍微留一點余地",
1370
+ "靛蓝色": "靛藍色",
1371
+ "改变输入参数的顺序与结构": "改變輸入參數的順序與結構",
1372
+ "中提取出“标题”、“收录会议或期刊”等基本信息": "中提取出“標題”、“收錄會議或期刊”等基本信息",
1373
+ "刷新界面用 yield from update_ui": "刷新界面用 yield from update_ui",
1374
+ "下载编号": "下載編號",
1375
+ "来自EdgeGPT.py": "來自EdgeGPT.py",
1376
+ "每个子任务的输出汇总": "每個子任務的輸出匯總",
1377
+ "你是一位专业的中文学术论文作家": "你是一位專業的中文學術論文作家",
1378
+ "加了^代表不匹配": "加了^代表不匹配",
1379
+ "则覆盖原config文件": "則覆蓋原config文件",
1380
+ "提交按钮、重置按钮": "提交按鈕、重置按鈕",
1381
+ "对程序的整体功能和构架重新做出概括": "對程式的整體功能和架構重新做出概述",
1382
+ "未配置": "未配置",
1383
+ "文本过长将进行截断": "文本過長將進行截斷",
1384
+ "将英文句号": "將英文句號",
1385
+ "则使用当前时间生成文件名": "則使用當前時間生成檔名",
1386
+ "或显存": "或顯存",
1387
+ "请只提供文本的更正版本": "請只提供文本的更正版本",
1388
+ "大部分时候仅仅为了fancy的视觉效果": "大部分時候僅僅為了fancy的視覺效果",
1389
+ "不能达到预期效果": "不能達到預期效果",
1390
+ "css等": "css等",
1391
+ "该函数只有20多行代码": "該函數只有20多行程式碼",
1392
+ "以下是一篇学术论文中的一段内容": "以下是一篇學術論文中的一段內容",
1393
+ "Markdown/Readme英译中": "Markdown/Readme英譯中",
1394
+ "递归搜索": "遞歸搜尋",
1395
+ "检查一下是不是忘了改config": "檢查一下是不是忘了改config",
1396
+ "不需要修改": "不需要修改",
1397
+ "请求GPT模型同时维持用户界面活跃": "請求GPT模型同時維持用戶界面活躍",
1398
+ "是本次输入": "是本次輸入",
1399
+ "随便切一下敷衍吧": "隨便切一下敷衍吧",
1400
+ "紫罗兰色": "紫羅蘭色",
1401
+ "显示/隐藏功能区": "顯示/隱藏功能區",
1402
+ "加入下拉菜单中": "加入下拉菜單中",
1403
+ "等待ChatGLM响应中": "等待ChatGLM響應中",
1404
+ "代码已经更新": "代碼已經更新",
1405
+ "总结文章": "總結文章",
1406
+ "正常": "正常",
1407
+ "降低请求频率中": "降低請求頻率中",
1408
+ "3. 根据 heuristic 规则判断换行符是否是段落分隔": "3. 根據heuristic規則判斷換行符是否是段落分隔",
1409
+ "整理反复出现的控件句柄组合": "整理反復出現的控件句柄組合",
1410
+ "则给出安装建议": "則給出安裝建議",
1411
+ "我们先及时地做一次界面更新": "我們先及時地做一次界面更新",
1412
+ "数据流的显示最后收到的多少个字符": "數據流的顯示最後收到的多少個字符",
1413
+ "并将输出部分的Markdown和数学公式转换为HTML格式": "並將輸出部分的Markdown和數學公式轉換為HTML格式",
1414
+ "rar和7z格式正常": "rar和7z格式正常",
1415
+ "代码高亮": "程式碼高亮",
1416
+ "和 __exit__": "和 __exit__",
1417
+ "黄色": "黃色",
1418
+ "使用线程池": "使用線程池",
1419
+ "的主要内容": "的主要內容",
1420
+ "定义注释的正则表达式": "定義註釋的正則表達式",
1421
+ "Reduce the length. 本次输入过长": "減少長度。本次輸入過長",
1422
+ "具备多线程调用能力的函数": "具備多線程調用能力的函數",
1423
+ "你是一个程序架构分析师": "你是一個程式架構分析師",
1424
+ "MOSS尚未加载": "MOSS尚未載入",
1425
+ "环境变量": "環境變數",
1426
+ "请分析此页面中出现的所有文章": "請分���此頁面中出現的所有文章",
1427
+ "只裁剪历史": "只裁剪歷史",
1428
+ "在结束时": "在結束時",
1429
+ "缺一不可": "缺一不可",
1430
+ "第10步": "第10步",
1431
+ "安全第一条": "安全第一條",
1432
+ "解释代码": "解釋程式碼",
1433
+ "地址": "地址",
1434
+ "全部文件解析完成": "全部檔案解析完成",
1435
+ "乱七八糟的后处理": "亂七八糟的後處理",
1436
+ "输入时用逗号隔开": "輸入時用逗號隔開",
1437
+ "对最相关的两个搜索结果进行总结": "對最相關的兩個搜索結果進行總結",
1438
+ "第": "第",
1439
+ "清空历史": "清空歷史",
1440
+ "引用次数是链接中的文本": "引用次數是鏈接中的文本",
1441
+ "时": "時",
1442
+ "如没有给定输入参数": "如沒有給定輸入參數",
1443
+ "与gradio版本和网络都相关": "與gradio版本和網絡都相關",
1444
+ "润色": "潤色",
1445
+ "青蓝色": "青藍色",
1446
+ "如果浏览器没有自动打开": "如果瀏覽器沒有自動打開",
1447
+ "新功能": "新功能",
1448
+ "会把traceback和已经接收的数据转入输出": "會把traceback和已經接收的數據轉入輸出",
1449
+ "在这里输入分辨率": "在這裡輸入分辨率",
1450
+ "至少一个线程任务意外失败": "至少一個線程任務意外失敗",
1451
+ "子进程Worker": "子進程Worker",
1452
+ "使用yield from语句返回重新加载过的函数": "使用yield from語句返回重新加載過的函數",
1453
+ "网络等出问题时": "網絡等出問題時",
1454
+ "does not exist. 模型不存在": "不存在該模型",
1455
+ "本地LLM模型如ChatGLM的执行方式 CPU/GPU": "本地LLM模型如ChatGLM的執行方式 CPU/GPU",
1456
+ "如果选择自动处理": "如果選擇自動處理",
1457
+ "找不到本地项目或无权访问": "找不到本地專案或無權訪問",
1458
+ "是否在arxiv中": "是否在arxiv中",
1459
+ "版": "版",
1460
+ "数据流的第一帧不携带content": "數據流的第一幀不攜帶content",
1461
+ "OpenAI和API2D不会走这里": "OpenAI和API2D不會走這裡",
1462
+ "请编辑以下文本": "請編輯以下文本",
1463
+ "尽可能多地保留文本": "盡可能多地保留文本",
1464
+ "将文本按照段落分隔符分割开": "將文本按照段落分隔符分割開",
1465
+ "获取成功": "獲取成功",
1466
+ "然后回答问题": "然後回答問題",
1467
+ "同时分解长句": "同時分解長句",
1468
+ "刷新时间间隔频率": "刷新時間間隔頻率",
1469
+ "您可以将任意一个文件路径粘贴到输入区": "您可以將任意一個文件路徑粘貼到輸入區",
1470
+ "需要手动安装新增的依赖库": "需要手動安裝新增的依賴庫",
1471
+ "的模板": "的模板",
1472
+ "重命名文件": "重命名文件",
1473
+ "第1步": "第1步",
1474
+ "只输出代码": "只輸出代碼",
1475
+ "准备对工程源代码进行汇总分析": "準備對工程源代碼進行匯總分析",
1476
+ "是所有LLM的通用接口": "是所有LLM的通用接口",
1477
+ "等待回复": "等待回覆",
1478
+ "此线程失败前收到的回答": "此線程失敗前收到的回答",
1479
+ "Call ChatGLM fail 不能正常加载ChatGLM的参数": "呼叫ChatGLM失敗,無法正常加載ChatGLM的參數",
1480
+ "输入参数 Args": "輸入參數Args",
1481
+ "也可以获取它": "也可以獲取它",
1482
+ "请求GPT模型的": "請求GPT模型的",
1483
+ "您将把您的API-KEY和对话隐私完全暴露给您设定的中间人!": "您將把您的API-KEY和對話隱私完全暴露給您設定的中間人!",
1484
+ "等待MOSS响应中": "等待MOSS響應中",
1485
+ "文件保存到本地": "文件保存到本地",
1486
+ "例如需要翻译的一段话": "例如需要翻譯的一段話",
1487
+ "避免解析压缩文件": "避免解析壓縮文件",
1488
+ "另外您可以随时在history子文件夹下找回旧版的程序": "另外您可以隨時在history子文件夾下找回舊版的程式",
1489
+ "由于您没有设置config_private.py私密配置": "由於您沒有設置config_private.py私密配置",
1490
+ "缺少ChatGLM的依赖": "缺少ChatGLM的依賴",
1491
+ "试着补上后个": "試著補上後個",
1492
+ "如果是网络上的文件": "如果是網路上的檔案",
1493
+ "找不到任何.tex或pdf文件": "找不到任何.tex或pdf檔案",
1494
+ "直到历史记录的标记数量降低到阈值以下": "直到歷史記錄的標記數量降低到閾值以下",
1495
+ "当代码输出半截的时候": "當程式碼輸出一半時",
1496
+ "输入区2": "輸入區2",
1497
+ "则删除报错信息": "則刪除錯誤訊息",
1498
+ "如果需要使用newbing": "如果需要使用newbing",
1499
+ "迭代之前的分析": "迭代之前的分析",
1500
+ "单线程方法": "單線程方法",
1501
+ "装载请求内容": "載入請求內容",
1502
+ "翻译为中文": "翻譯為中文",
1503
+ "以及代理设置的格式是否正确": "以及代理設置的格式是否正確",
1504
+ "石头色": "石頭色",
1505
+ "输入谷歌学术搜索页url": "輸入谷歌學術搜索頁URL",
1506
+ "可选 ↓↓↓": "可選 ↓↓↓",
1507
+ "再点击按钮": "再點擊按鈕",
1508
+ "开发者们❤️": "開發者們❤️",
1509
+ "若再次失败则更可能是因为输入过长.": "若再次失敗則更可能是因為輸入過長。",
1510
+ "载入对话": "載入對話",
1511
+ "包括": "包括",
1512
+ "或者": "或者",
1513
+ "并执行函数的新版本": "並執行函數的新版本",
1514
+ "论文": "論文"
1515
+ }
docs/waifu_plugin/autoload.js ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ try {
2
+ $("<link>").attr({href: "file=docs/waifu_plugin/waifu.css", rel: "stylesheet", type: "text/css"}).appendTo('head');
3
+ $('body').append('<div class="waifu"><div class="waifu-tips"></div><canvas id="live2d" class="live2d"></canvas><div class="waifu-tool"><span class="fui-home"></span> <span class="fui-chat"></span> <span class="fui-eye"></span> <span class="fui-user"></span> <span class="fui-photo"></span> <span class="fui-info-circle"></span> <span class="fui-cross"></span></div></div>');
4
+ $.ajax({url: "file=docs/waifu_plugin/waifu-tips.js", dataType:"script", cache: true, success: function() {
5
+ $.ajax({url: "file=docs/waifu_plugin/live2d.js", dataType:"script", cache: true, success: function() {
6
+ /* 可直接修改部分参数 */
7
+ live2d_settings['hitokotoAPI'] = "hitokoto.cn"; // 一言 API
8
+ live2d_settings['modelId'] = 5; // 默认模型 ID
9
+ live2d_settings['modelTexturesId'] = 1; // 默认材质 ID
10
+ live2d_settings['modelStorage'] = false; // 不储存模型 ID
11
+ live2d_settings['waifuSize'] = '210x187';
12
+ live2d_settings['waifuTipsSize'] = '187x52';
13
+ live2d_settings['canSwitchModel'] = true;
14
+ live2d_settings['canSwitchTextures'] = true;
15
+ live2d_settings['canSwitchHitokoto'] = false;
16
+ live2d_settings['canTakeScreenshot'] = false;
17
+ live2d_settings['canTurnToHomePage'] = false;
18
+ live2d_settings['canTurnToAboutPage'] = false;
19
+ live2d_settings['showHitokoto'] = false; // 显示一言
20
+ live2d_settings['showF12Status'] = false; // 显示加载状态
21
+ live2d_settings['showF12Message'] = false; // 显示看板娘消息
22
+ live2d_settings['showF12OpenMsg'] = false; // 显示控制台打开提示
23
+ live2d_settings['showCopyMessage'] = false; // 显示 复制内容 提示
24
+ live2d_settings['showWelcomeMessage'] = true; // 显示进入面页欢迎词
25
+
26
+ /* 在 initModel 前添加 */
27
+ initModel("file=docs/waifu_plugin/waifu-tips.json");
28
+ }});
29
+ }});
30
+ } catch(err) { console.log("[Error] JQuery is not defined.") }
docs/waifu_plugin/flat-ui-icons-regular.eot ADDED
Binary file (25.9 kB). View file
 
docs/waifu_plugin/flat-ui-icons-regular.svg ADDED
docs/waifu_plugin/flat-ui-icons-regular.ttf ADDED
Binary file (25.7 kB). View file
 
docs/waifu_plugin/flat-ui-icons-regular.woff ADDED
Binary file (17.8 kB). View file
 
docs/waifu_plugin/jquery-ui.min.js ADDED
The diff for this file is too large to render. See raw diff
 
docs/waifu_plugin/jquery.min.js ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ /*! jQuery v1.11.1 | (c) 2005, 2014 jQuery Foundation, Inc. | jquery.org/license */
2
+ !function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var c=[],d=c.slice,e=c.concat,f=c.push,g=c.indexOf,h={},i=h.toString,j=h.hasOwnProperty,k={},l="1.11.1",m=function(a,b){return new m.fn.init(a,b)},n=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,o=/^-ms-/,p=/-([\da-z])/gi,q=function(a,b){return b.toUpperCase()};m.fn=m.prototype={jquery:l,constructor:m,selector:"",length:0,toArray:function(){return d.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:d.call(this)},pushStack:function(a){var b=m.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a,b){return m.each(this,a,b)},map:function(a){return this.pushStack(m.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(d.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor(null)},push:f,sort:c.sort,splice:c.splice},m.extend=m.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||m.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(e=arguments[h]))for(d in e)a=g[d],c=e[d],g!==c&&(j&&c&&(m.isPlainObject(c)||(b=m.isArray(c)))?(b?(b=!1,f=a&&m.isArray(a)?a:[]):f=a&&m.isPlainObject(a)?a:{},g[d]=m.extend(j,f,c)):void 0!==c&&(g[d]=c));return g},m.extend({expando:"jQuery"+(l+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===m.type(a)},isArray:Array.isArray||function(a){return"array"===m.type(a)},isWindow:function(a){return null!=a&&a==a.window},isNumeric:function(a){return!m.isArray(a)&&a-parseFloat(a)>=0},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},isPlainObject:function(a){var b;if(!a||"object"!==m.type(a)||a.nodeType||m.isWindow(a))return!1;try{if(a.constructor&&!j.call(a,"constructor")&&!j.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(c){return!1}if(k.ownLast)for(b in a)return j.call(a,b);for(b in a);return void 0===b||j.call(a,b)},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?h[i.call(a)]||"object":typeof a},globalEval:function(b){b&&m.trim(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return a.replace(o,"ms-").replace(p,q)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b,c){var d,e=0,f=a.length,g=r(a);if(c){if(g){for(;f>e;e++)if(d=b.apply(a[e],c),d===!1)break}else for(e in a)if(d=b.apply(a[e],c),d===!1)break}else if(g){for(;f>e;e++)if(d=b.call(a[e],e,a[e]),d===!1)break}else for(e in a)if(d=b.call(a[e],e,a[e]),d===!1)break;return a},trim:function(a){return null==a?"":(a+"").replace(n,"")},makeArray:function(a,b){var c=b||[];return null!=a&&(r(Object(a))?m.merge(c,"string"==typeof a?[a]:a):f.call(c,a)),c},inArray:function(a,b,c){var d;if(b){if(g)return g.call(b,a,c);for(d=b.length,c=c?0>c?Math.max(0,d+c):c:0;d>c;c++)if(c in b&&b[c]===a)return c}return-1},merge:function(a,b){var c=+b.length,d=0,e=a.length;while(c>d)a[e++]=b[d++];if(c!==c)while(void 0!==b[d])a[e++]=b[d++];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,f=0,g=a.length,h=r(a),i=[];if(h)for(;g>f;f++)d=b(a[f],f,c),null!=d&&i.push(d);else for(f in a)d=b(a[f],f,c),null!=d&&i.push(d);return e.apply([],i)},guid:1,proxy:function(a,b){var c,e,f;return"string"==typeof b&&(f=a[b],b=a,a=f),m.isFunction(a)?(c=d.call(arguments,2),e=function(){return a.apply(b||this,c.concat(d.call(arguments)))},e.guid=a.guid=a.guid||m.guid++,e):void 0},now:function(){return+new Date},support:k}),m.each("Boolean Number String Function Array Date RegExp Object Error".split(" "),function(a,b){h["[object "+b+"]"]=b.toLowerCase()});function r(a){var b=a.length,c=m.type(a);return"function"===c||m.isWindow(a)?!1:1===a.nodeType&&b?!0:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}var s=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+-new Date,v=a.document,w=0,x=0,y=gb(),z=gb(),A=gb(),B=function(a,b){return a===b&&(l=!0),0},C="undefined",D=1<<31,E={}.hasOwnProperty,F=[],G=F.pop,H=F.push,I=F.push,J=F.slice,K=F.indexOf||function(a){for(var b=0,c=this.length;c>b;b++)if(this[b]===a)return b;return-1},L="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",M="[\\x20\\t\\r\\n\\f]",N="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",O=N.replace("w","w#"),P="\\["+M+"*("+N+")(?:"+M+"*([*^$|!~]?=)"+M+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+O+"))|)"+M+"*\\]",Q=":("+N+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+P+")*)|.*)\\)|)",R=new RegExp("^"+M+"+|((?:^|[^\\\\])(?:\\\\.)*)"+M+"+$","g"),S=new RegExp("^"+M+"*,"+M+"*"),T=new RegExp("^"+M+"*([>+~]|"+M+")"+M+"*"),U=new RegExp("="+M+"*([^\\]'\"]*?)"+M+"*\\]","g"),V=new RegExp(Q),W=new RegExp("^"+O+"$"),X={ID:new RegExp("^#("+N+")"),CLASS:new RegExp("^\\.("+N+")"),TAG:new RegExp("^("+N.replace("w","w*")+")"),ATTR:new RegExp("^"+P),PSEUDO:new RegExp("^"+Q),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+L+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/^(?:input|select|textarea|button)$/i,Z=/^h\d$/i,$=/^[^{]+\{\s*\[native \w/,_=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ab=/[+~]/,bb=/'|\\/g,cb=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),db=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)};try{I.apply(F=J.call(v.childNodes),v.childNodes),F[v.childNodes.length].nodeType}catch(eb){I={apply:F.length?function(a,b){H.apply(a,J.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function fb(a,b,d,e){var f,h,j,k,l,o,r,s,w,x;if((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,d=d||[],!a||"string"!=typeof a)return d;if(1!==(k=b.nodeType)&&9!==k)return[];if(p&&!e){if(f=_.exec(a))if(j=f[1]){if(9===k){if(h=b.getElementById(j),!h||!h.parentNode)return d;if(h.id===j)return d.push(h),d}else if(b.ownerDocument&&(h=b.ownerDocument.getElementById(j))&&t(b,h)&&h.id===j)return d.push(h),d}else{if(f[2])return I.apply(d,b.getElementsByTagName(a)),d;if((j=f[3])&&c.getElementsByClassName&&b.getElementsByClassName)return I.apply(d,b.getElementsByClassName(j)),d}if(c.qsa&&(!q||!q.test(a))){if(s=r=u,w=b,x=9===k&&a,1===k&&"object"!==b.nodeName.toLowerCase()){o=g(a),(r=b.getAttribute("id"))?s=r.replace(bb,"\\$&"):b.setAttribute("id",s),s="[id='"+s+"'] ",l=o.length;while(l--)o[l]=s+qb(o[l]);w=ab.test(a)&&ob(b.parentNode)||b,x=o.join(",")}if(x)try{return I.apply(d,w.querySelectorAll(x)),d}catch(y){}finally{r||b.removeAttribute("id")}}}return i(a.replace(R,"$1"),b,d,e)}function gb(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function hb(a){return a[u]=!0,a}function ib(a){var b=n.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function jb(a,b){var c=a.split("|"),e=a.length;while(e--)d.attrHandle[c[e]]=b}function kb(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||D)-(~a.sourceIndex||D);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function lb(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function mb(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function nb(a){return hb(function(b){return b=+b,hb(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function ob(a){return a&&typeof a.getElementsByTagName!==C&&a}c=fb.support={},f=fb.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},m=fb.setDocument=function(a){var b,e=a?a.ownerDocument||a:v,g=e.defaultView;return e!==n&&9===e.nodeType&&e.documentElement?(n=e,o=e.documentElement,p=!f(e),g&&g!==g.top&&(g.addEventListener?g.addEventListener("unload",function(){m()},!1):g.attachEvent&&g.attachEvent("onunload",function(){m()})),c.attributes=ib(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=ib(function(a){return a.appendChild(e.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=$.test(e.getElementsByClassName)&&ib(function(a){return a.innerHTML="<div class='a'></div><div class='a i'></div>",a.firstChild.className="i",2===a.getElementsByClassName("i").length}),c.getById=ib(function(a){return o.appendChild(a).id=u,!e.getElementsByName||!e.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if(typeof b.getElementById!==C&&p){var c=b.getElementById(a);return c&&c.parentNode?[c]:[]}},d.filter.ID=function(a){var b=a.replace(cb,db);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(cb,db);return function(a){var c=typeof a.getAttributeNode!==C&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return typeof b.getElementsByTagName!==C?b.getElementsByTagName(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return typeof b.getElementsByClassName!==C&&p?b.getElementsByClassName(a):void 0},r=[],q=[],(c.qsa=$.test(e.querySelectorAll))&&(ib(function(a){a.innerHTML="<select msallowclip=''><option selected=''></option></select>",a.querySelectorAll("[msallowclip^='']").length&&q.push("[*^$]="+M+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+M+"*(?:value|"+L+")"),a.querySelectorAll(":checked").length||q.push(":checked")}),ib(function(a){var b=e.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+M+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=$.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ib(function(a){c.disconnectedMatch=s.call(a,"div"),s.call(a,"[s!='']:x"),r.push("!=",Q)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=$.test(o.compareDocumentPosition),t=b||$.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===e||a.ownerDocument===v&&t(v,a)?-1:b===e||b.ownerDocument===v&&t(v,b)?1:k?K.call(k,a)-K.call(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,f=a.parentNode,g=b.parentNode,h=[a],i=[b];if(!f||!g)return a===e?-1:b===e?1:f?-1:g?1:k?K.call(k,a)-K.call(k,b):0;if(f===g)return kb(a,b);c=a;while(c=c.parentNode)h.unshift(c);c=b;while(c=c.parentNode)i.unshift(c);while(h[d]===i[d])d++;return d?kb(h[d],i[d]):h[d]===v?-1:i[d]===v?1:0},e):n},fb.matches=function(a,b){return fb(a,null,null,b)},fb.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(U,"='$1']"),!(!c.matchesSelector||!p||r&&r.test(b)||q&&q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return fb(b,n,null,[a]).length>0},fb.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},fb.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&E.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},fb.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},fb.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=fb.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=fb.selectors={cacheLength:50,createPseudo:hb,match:X,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(cb,db),a[3]=(a[3]||a[4]||a[5]||"").replace(cb,db),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||fb.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&fb.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return X.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&V.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(cb,db).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+M+")"+a+"("+M+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||typeof a.getAttribute!==C&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=fb.attr(d,a);return null==e?"!="===b:b?(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e+" ").indexOf(c)>-1:"|="===b?e===c||e.slice(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h;if(q){if(f){while(p){l=b;while(l=l[p])if(h?l.nodeName.toLowerCase()===r:1===l.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){k=q[u]||(q[u]={}),j=k[a]||[],n=j[0]===w&&j[1],m=j[0]===w&&j[2],l=n&&q.childNodes[n];while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if(1===l.nodeType&&++m&&l===b){k[a]=[w,n,m];break}}else if(s&&(j=(b[u]||(b[u]={}))[a])&&j[0]===w)m=j[1];else while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if((h?l.nodeName.toLowerCase()===r:1===l.nodeType)&&++m&&(s&&((l[u]||(l[u]={}))[a]=[w,m]),l===b))break;return m-=e,m===d||m%d===0&&m/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||fb.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?hb(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=K.call(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:hb(function(a){var b=[],c=[],d=h(a.replace(R,"$1"));return d[u]?hb(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),!c.pop()}}),has:hb(function(a){return function(b){return fb(a,b).length>0}}),contains:hb(function(a){return function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:hb(function(a){return W.test(a||"")||fb.error("unsupported lang: "+a),a=a.replace(cb,db).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return Z.test(a.nodeName)},input:function(a){return Y.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:nb(function(){return[0]}),last:nb(function(a,b){return[b-1]}),eq:nb(function(a,b,c){return[0>c?c+b:c]}),even:nb(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:nb(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:nb(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:nb(function(a,b,c){for(var d=0>c?c+b:c;++d<b;)a.push(d);return a})}},d.pseudos.nth=d.pseudos.eq;for(b in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})d.pseudos[b]=lb(b);for(b in{submit:!0,reset:!0})d.pseudos[b]=mb(b);function pb(){}pb.prototype=d.filters=d.pseudos,d.setFilters=new pb,g=fb.tokenize=function(a,b){var c,e,f,g,h,i,j,k=z[a+" "];if(k)return b?0:k.slice(0);h=a,i=[],j=d.preFilter;while(h){(!c||(e=S.exec(h)))&&(e&&(h=h.slice(e[0].length)||h),i.push(f=[])),c=!1,(e=T.exec(h))&&(c=e.shift(),f.push({value:c,type:e[0].replace(R," ")}),h=h.slice(c.length));for(g in d.filter)!(e=X[g].exec(h))||j[g]&&!(e=j[g](e))||(c=e.shift(),f.push({value:c,type:g,matches:e}),h=h.slice(c.length));if(!c)break}return b?h.length:h?fb.error(a):z(a,i).slice(0)};function qb(a){for(var b=0,c=a.length,d="";c>b;b++)d+=a[b].value;return d}function rb(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=x++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j=[w,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(i=b[u]||(b[u]={}),(h=i[d])&&h[0]===w&&h[1]===f)return j[2]=h[2];if(i[d]=j,j[2]=a(b,c,g))return!0}}}function sb(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function tb(a,b,c){for(var d=0,e=b.length;e>d;d++)fb(a,b[d],c);return c}function ub(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(!c||c(f,d,e))&&(g.push(f),j&&b.push(h));return g}function vb(a,b,c,d,e,f){return d&&!d[u]&&(d=vb(d)),e&&!e[u]&&(e=vb(e,f)),hb(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||tb(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:ub(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=ub(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?K.call(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=ub(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):I.apply(g,r)})}function wb(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=rb(function(a){return a===b},h,!0),l=rb(function(a){return K.call(b,a)>-1},h,!0),m=[function(a,c,d){return!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d))}];f>i;i++)if(c=d.relative[a[i].type])m=[rb(sb(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;f>e;e++)if(d.relative[a[e].type])break;return vb(i>1&&sb(m),i>1&&qb(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(R,"$1"),c,e>i&&wb(a.slice(i,e)),f>e&&wb(a=a.slice(e)),f>e&&qb(a))}m.push(c)}return sb(m)}function xb(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,m,o,p=0,q="0",r=f&&[],s=[],t=j,u=f||e&&d.find.TAG("*",k),v=w+=null==t?1:Math.random()||.1,x=u.length;for(k&&(j=g!==n&&g);q!==x&&null!=(l=u[q]);q++){if(e&&l){m=0;while(o=a[m++])if(o(l,g,h)){i.push(l);break}k&&(w=v)}c&&((l=!o&&l)&&p--,f&&r.push(l))}if(p+=q,c&&q!==p){m=0;while(o=b[m++])o(r,s,g,h);if(f){if(p>0)while(q--)r[q]||s[q]||(s[q]=G.call(i));s=ub(s)}I.apply(i,s),k&&!f&&s.length>0&&p+b.length>1&&fb.uniqueSort(i)}return k&&(w=v,j=t),r};return c?hb(f):f}return h=fb.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=wb(b[c]),f[u]?d.push(f):e.push(f);f=A(a,xb(e,d)),f.selector=a}return f},i=fb.select=function(a,b,e,f){var i,j,k,l,m,n="function"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&"ID"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(cb,db),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=X.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(cb,db),ab.test(j[0].type)&&ob(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&qb(j),!a)return I.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,ab.test(a)&&ob(b.parentNode)||b),e},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ib(function(a){return 1&a.compareDocumentPosition(n.createElement("div"))}),ib(function(a){return a.innerHTML="<a href='#'></a>","#"===a.firstChild.getAttribute("href")})||jb("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&ib(function(a){return a.innerHTML="<input/>",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||jb("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),ib(function(a){return null==a.getAttribute("disabled")})||jb(L,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),fb}(a);m.find=s,m.expr=s.selectors,m.expr[":"]=m.expr.pseudos,m.unique=s.uniqueSort,m.text=s.getText,m.isXMLDoc=s.isXML,m.contains=s.contains;var t=m.expr.match.needsContext,u=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,v=/^.[^:#\[\.,]*$/;function w(a,b,c){if(m.isFunction(b))return m.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return m.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(v.test(b))return m.filter(b,a,c);b=m.filter(b,a)}return m.grep(a,function(a){return m.inArray(a,b)>=0!==c})}m.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?m.find.matchesSelector(d,a)?[d]:[]:m.find.matches(a,m.grep(b,function(a){return 1===a.nodeType}))},m.fn.extend({find:function(a){var b,c=[],d=this,e=d.length;if("string"!=typeof a)return this.pushStack(m(a).filter(function(){for(b=0;e>b;b++)if(m.contains(d[b],this))return!0}));for(b=0;e>b;b++)m.find(a,d[b],c);return c=this.pushStack(e>1?m.unique(c):c),c.selector=this.selector?this.selector+" "+a:a,c},filter:function(a){return this.pushStack(w(this,a||[],!1))},not:function(a){return this.pushStack(w(this,a||[],!0))},is:function(a){return!!w(this,"string"==typeof a&&t.test(a)?m(a):a||[],!1).length}});var x,y=a.document,z=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,A=m.fn.init=function(a,b){var c,d;if(!a)return this;if("string"==typeof a){if(c="<"===a.charAt(0)&&">"===a.charAt(a.length-1)&&a.length>=3?[null,a,null]:z.exec(a),!c||!c[1]&&b)return!b||b.jquery?(b||x).find(a):this.constructor(b).find(a);if(c[1]){if(b=b instanceof m?b[0]:b,m.merge(this,m.parseHTML(c[1],b&&b.nodeType?b.ownerDocument||b:y,!0)),u.test(c[1])&&m.isPlainObject(b))for(c in b)m.isFunction(this[c])?this[c](b[c]):this.attr(c,b[c]);return this}if(d=y.getElementById(c[2]),d&&d.parentNode){if(d.id!==c[2])return x.find(a);this.length=1,this[0]=d}return this.context=y,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):m.isFunction(a)?"undefined"!=typeof x.ready?x.ready(a):a(m):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),m.makeArray(a,this))};A.prototype=m.fn,x=m(y);var B=/^(?:parents|prev(?:Until|All))/,C={children:!0,contents:!0,next:!0,prev:!0};m.extend({dir:function(a,b,c){var d=[],e=a[b];while(e&&9!==e.nodeType&&(void 0===c||1!==e.nodeType||!m(e).is(c)))1===e.nodeType&&d.push(e),e=e[b];return d},sibling:function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c}}),m.fn.extend({has:function(a){var b,c=m(a,this),d=c.length;return this.filter(function(){for(b=0;d>b;b++)if(m.contains(this,c[b]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=t.test(a)||"string"!=typeof a?m(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&m.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?m.unique(f):f)},index:function(a){return a?"string"==typeof a?m.inArray(this[0],m(a)):m.inArray(a.jquery?a[0]:a,this):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(m.unique(m.merge(this.get(),m(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function D(a,b){do a=a[b];while(a&&1!==a.nodeType);return a}m.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return m.dir(a,"parentNode")},parentsUntil:function(a,b,c){return m.dir(a,"parentNode",c)},next:function(a){return D(a,"nextSibling")},prev:function(a){return D(a,"previousSibling")},nextAll:function(a){return m.dir(a,"nextSibling")},prevAll:function(a){return m.dir(a,"previousSibling")},nextUntil:function(a,b,c){return m.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return m.dir(a,"previousSibling",c)},siblings:function(a){return m.sibling((a.parentNode||{}).firstChild,a)},children:function(a){return m.sibling(a.firstChild)},contents:function(a){return m.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:m.merge([],a.childNodes)}},function(a,b){m.fn[a]=function(c,d){var e=m.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=m.filter(d,e)),this.length>1&&(C[a]||(e=m.unique(e)),B.test(a)&&(e=e.reverse())),this.pushStack(e)}});var E=/\S+/g,F={};function G(a){var b=F[a]={};return m.each(a.match(E)||[],function(a,c){b[c]=!0}),b}m.Callbacks=function(a){a="string"==typeof a?F[a]||G(a):m.extend({},a);var b,c,d,e,f,g,h=[],i=!a.once&&[],j=function(l){for(c=a.memory&&l,d=!0,f=g||0,g=0,e=h.length,b=!0;h&&e>f;f++)if(h[f].apply(l[0],l[1])===!1&&a.stopOnFalse){c=!1;break}b=!1,h&&(i?i.length&&j(i.shift()):c?h=[]:k.disable())},k={add:function(){if(h){var d=h.length;!function f(b){m.each(b,function(b,c){var d=m.type(c);"function"===d?a.unique&&k.has(c)||h.push(c):c&&c.length&&"string"!==d&&f(c)})}(arguments),b?e=h.length:c&&(g=d,j(c))}return this},remove:function(){return h&&m.each(arguments,function(a,c){var d;while((d=m.inArray(c,h,d))>-1)h.splice(d,1),b&&(e>=d&&e--,f>=d&&f--)}),this},has:function(a){return a?m.inArray(a,h)>-1:!(!h||!h.length)},empty:function(){return h=[],e=0,this},disable:function(){return h=i=c=void 0,this},disabled:function(){return!h},lock:function(){return i=void 0,c||k.disable(),this},locked:function(){return!i},fireWith:function(a,c){return!h||d&&!i||(c=c||[],c=[a,c.slice?c.slice():c],b?i.push(c):j(c)),this},fire:function(){return k.fireWith(this,arguments),this},fired:function(){return!!d}};return k},m.extend({Deferred:function(a){var b=[["resolve","done",m.Callbacks("once memory"),"resolved"],["reject","fail",m.Callbacks("once memory"),"rejected"],["notify","progress",m.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return m.Deferred(function(c){m.each(b,function(b,f){var g=m.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&m.isFunction(a.promise)?a.promise().done(c.resolve).fail(c.reject).progress(c.notify):c[f[0]+"With"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?m.extend(a,d):d}},e={};return d.pipe=d.then,m.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=d.call(arguments),e=c.length,f=1!==e||a&&m.isFunction(a.promise)?e:0,g=1===f?a:m.Deferred(),h=function(a,b,c){return function(e){b[a]=this,c[a]=arguments.length>1?d.call(arguments):e,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(e>1)for(i=new Array(e),j=new Array(e),k=new Array(e);e>b;b++)c[b]&&m.isFunction(c[b].promise)?c[b].promise().done(h(b,k,c)).fail(g.reject).progress(h(b,j,i)):--f;return f||g.resolveWith(k,c),g.promise()}});var H;m.fn.ready=function(a){return m.ready.promise().done(a),this},m.extend({isReady:!1,readyWait:1,holdReady:function(a){a?m.readyWait++:m.ready(!0)},ready:function(a){if(a===!0?!--m.readyWait:!m.isReady){if(!y.body)return setTimeout(m.ready);m.isReady=!0,a!==!0&&--m.readyWait>0||(H.resolveWith(y,[m]),m.fn.triggerHandler&&(m(y).triggerHandler("ready"),m(y).off("ready")))}}});function I(){y.addEventListener?(y.removeEventListener("DOMContentLoaded",J,!1),a.removeEventListener("load",J,!1)):(y.detachEvent("onreadystatechange",J),a.detachEvent("onload",J))}function J(){(y.addEventListener||"load"===event.type||"complete"===y.readyState)&&(I(),m.ready())}m.ready.promise=function(b){if(!H)if(H=m.Deferred(),"complete"===y.readyState)setTimeout(m.ready);else if(y.addEventListener)y.addEventListener("DOMContentLoaded",J,!1),a.addEventListener("load",J,!1);else{y.attachEvent("onreadystatechange",J),a.attachEvent("onload",J);var c=!1;try{c=null==a.frameElement&&y.documentElement}catch(d){}c&&c.doScroll&&!function e(){if(!m.isReady){try{c.doScroll("left")}catch(a){return setTimeout(e,50)}I(),m.ready()}}()}return H.promise(b)};var K="undefined",L;for(L in m(k))break;k.ownLast="0"!==L,k.inlineBlockNeedsLayout=!1,m(function(){var a,b,c,d;c=y.getElementsByTagName("body")[0],c&&c.style&&(b=y.createElement("div"),d=y.createElement("div"),d.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(d).appendChild(b),typeof b.style.zoom!==K&&(b.style.cssText="display:inline;margin:0;border:0;padding:1px;width:1px;zoom:1",k.inlineBlockNeedsLayout=a=3===b.offsetWidth,a&&(c.style.zoom=1)),c.removeChild(d))}),function(){var a=y.createElement("div");if(null==k.deleteExpando){k.deleteExpando=!0;try{delete a.test}catch(b){k.deleteExpando=!1}}a=null}(),m.acceptData=function(a){var b=m.noData[(a.nodeName+" ").toLowerCase()],c=+a.nodeType||1;return 1!==c&&9!==c?!1:!b||b!==!0&&a.getAttribute("classid")===b};var M=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,N=/([A-Z])/g;function O(a,b,c){if(void 0===c&&1===a.nodeType){var d="data-"+b.replace(N,"-$1").toLowerCase();if(c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"false"===c?!1:"null"===c?null:+c+""===c?+c:M.test(c)?m.parseJSON(c):c}catch(e){}m.data(a,b,c)}else c=void 0}return c}function P(a){var b;for(b in a)if(("data"!==b||!m.isEmptyObject(a[b]))&&"toJSON"!==b)return!1;return!0}function Q(a,b,d,e){if(m.acceptData(a)){var f,g,h=m.expando,i=a.nodeType,j=i?m.cache:a,k=i?a[h]:a[h]&&h;
3
+ if(k&&j[k]&&(e||j[k].data)||void 0!==d||"string"!=typeof b)return k||(k=i?a[h]=c.pop()||m.guid++:h),j[k]||(j[k]=i?{}:{toJSON:m.noop}),("object"==typeof b||"function"==typeof b)&&(e?j[k]=m.extend(j[k],b):j[k].data=m.extend(j[k].data,b)),g=j[k],e||(g.data||(g.data={}),g=g.data),void 0!==d&&(g[m.camelCase(b)]=d),"string"==typeof b?(f=g[b],null==f&&(f=g[m.camelCase(b)])):f=g,f}}function R(a,b,c){if(m.acceptData(a)){var d,e,f=a.nodeType,g=f?m.cache:a,h=f?a[m.expando]:m.expando;if(g[h]){if(b&&(d=c?g[h]:g[h].data)){m.isArray(b)?b=b.concat(m.map(b,m.camelCase)):b in d?b=[b]:(b=m.camelCase(b),b=b in d?[b]:b.split(" ")),e=b.length;while(e--)delete d[b[e]];if(c?!P(d):!m.isEmptyObject(d))return}(c||(delete g[h].data,P(g[h])))&&(f?m.cleanData([a],!0):k.deleteExpando||g!=g.window?delete g[h]:g[h]=null)}}}m.extend({cache:{},noData:{"applet ":!0,"embed ":!0,"object ":"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000"},hasData:function(a){return a=a.nodeType?m.cache[a[m.expando]]:a[m.expando],!!a&&!P(a)},data:function(a,b,c){return Q(a,b,c)},removeData:function(a,b){return R(a,b)},_data:function(a,b,c){return Q(a,b,c,!0)},_removeData:function(a,b){return R(a,b,!0)}}),m.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=m.data(f),1===f.nodeType&&!m._data(f,"parsedAttrs"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf("data-")&&(d=m.camelCase(d.slice(5)),O(f,d,e[d])));m._data(f,"parsedAttrs",!0)}return e}return"object"==typeof a?this.each(function(){m.data(this,a)}):arguments.length>1?this.each(function(){m.data(this,a,b)}):f?O(f,a,m.data(f,a)):void 0},removeData:function(a){return this.each(function(){m.removeData(this,a)})}}),m.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=m._data(a,b),c&&(!d||m.isArray(c)?d=m._data(a,b,m.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx";var c=m.queue(a,b),d=c.length,e=c.shift(),f=m._queueHooks(a,b),g=function(){m.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return m._data(a,c)||m._data(a,c,{empty:m.Callbacks("once memory").add(function(){m._removeData(a,b+"queue"),m._removeData(a,c)})})}}),m.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.length<c?m.queue(this[0],a):void 0===b?this:this.each(function(){var c=m.queue(this,a,b);m._queueHooks(this,a),"fx"===a&&"inprogress"!==c[0]&&m.dequeue(this,a)})},dequeue:function(a){return this.each(function(){m.dequeue(this,a)})},clearQueue:function(a){return this.queue(a||"fx",[])},promise:function(a,b){var c,d=1,e=m.Deferred(),f=this,g=this.length,h=function(){--d||e.resolveWith(f,[f])};"string"!=typeof a&&(b=a,a=void 0),a=a||"fx";while(g--)c=m._data(f[g],a+"queueHooks"),c&&c.empty&&(d++,c.empty.add(h));return h(),e.promise(b)}});var S=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,T=["Top","Right","Bottom","Left"],U=function(a,b){return a=b||a,"none"===m.css(a,"display")||!m.contains(a.ownerDocument,a)},V=m.access=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if("object"===m.type(c)){e=!0;for(h in c)m.access(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0,m.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(m(a),c)})),b))for(;i>h;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f},W=/^(?:checkbox|radio)$/i;!function(){var a=y.createElement("input"),b=y.createElement("div"),c=y.createDocumentFragment();if(b.innerHTML=" <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",k.leadingWhitespace=3===b.firstChild.nodeType,k.tbody=!b.getElementsByTagName("tbody").length,k.htmlSerialize=!!b.getElementsByTagName("link").length,k.html5Clone="<:nav></:nav>"!==y.createElement("nav").cloneNode(!0).outerHTML,a.type="checkbox",a.checked=!0,c.appendChild(a),k.appendChecked=a.checked,b.innerHTML="<textarea>x</textarea>",k.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue,c.appendChild(b),b.innerHTML="<input type='radio' checked='checked' name='t'/>",k.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,k.noCloneEvent=!0,b.attachEvent&&(b.attachEvent("onclick",function(){k.noCloneEvent=!1}),b.cloneNode(!0).click()),null==k.deleteExpando){k.deleteExpando=!0;try{delete b.test}catch(d){k.deleteExpando=!1}}}(),function(){var b,c,d=y.createElement("div");for(b in{submit:!0,change:!0,focusin:!0})c="on"+b,(k[b+"Bubbles"]=c in a)||(d.setAttribute(c,"t"),k[b+"Bubbles"]=d.attributes[c].expando===!1);d=null}();var X=/^(?:input|select|textarea)$/i,Y=/^key/,Z=/^(?:mouse|pointer|contextmenu)|click/,$=/^(?:focusinfocus|focusoutblur)$/,_=/^([^.]*)(?:\.(.+)|)$/;function ab(){return!0}function bb(){return!1}function cb(){try{return y.activeElement}catch(a){}}m.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,n,o,p,q,r=m._data(a);if(r){c.handler&&(i=c,c=i.handler,e=i.selector),c.guid||(c.guid=m.guid++),(g=r.events)||(g=r.events={}),(k=r.handle)||(k=r.handle=function(a){return typeof m===K||a&&m.event.triggered===a.type?void 0:m.event.dispatch.apply(k.elem,arguments)},k.elem=a),b=(b||"").match(E)||[""],h=b.length;while(h--)f=_.exec(b[h])||[],o=q=f[1],p=(f[2]||"").split(".").sort(),o&&(j=m.event.special[o]||{},o=(e?j.delegateType:j.bindType)||o,j=m.event.special[o]||{},l=m.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&m.expr.match.needsContext.test(e),namespace:p.join(".")},i),(n=g[o])||(n=g[o]=[],n.delegateCount=0,j.setup&&j.setup.call(a,d,p,k)!==!1||(a.addEventListener?a.addEventListener(o,k,!1):a.attachEvent&&a.attachEvent("on"+o,k))),j.add&&(j.add.call(a,l),l.handler.guid||(l.handler.guid=c.guid)),e?n.splice(n.delegateCount++,0,l):n.push(l),m.event.global[o]=!0);a=null}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,n,o,p,q,r=m.hasData(a)&&m._data(a);if(r&&(k=r.events)){b=(b||"").match(E)||[""],j=b.length;while(j--)if(h=_.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=m.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,n=k[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),i=f=n.length;while(f--)g=n[f],!e&&q!==g.origType||c&&c.guid!==g.guid||h&&!h.test(g.namespace)||d&&d!==g.selector&&("**"!==d||!g.selector)||(n.splice(f,1),g.selector&&n.delegateCount--,l.remove&&l.remove.call(a,g));i&&!n.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||m.removeEvent(a,o,r.handle),delete k[o])}else for(o in k)m.event.remove(a,o+b[j],c,d,!0);m.isEmptyObject(k)&&(delete r.handle,m._removeData(a,"events"))}},trigger:function(b,c,d,e){var f,g,h,i,k,l,n,o=[d||y],p=j.call(b,"type")?b.type:b,q=j.call(b,"namespace")?b.namespace.split("."):[];if(h=l=d=d||y,3!==d.nodeType&&8!==d.nodeType&&!$.test(p+m.event.triggered)&&(p.indexOf(".")>=0&&(q=p.split("."),p=q.shift(),q.sort()),g=p.indexOf(":")<0&&"on"+p,b=b[m.expando]?b:new m.Event(p,"object"==typeof b&&b),b.isTrigger=e?2:3,b.namespace=q.join("."),b.namespace_re=b.namespace?new RegExp("(^|\\.)"+q.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=d),c=null==c?[b]:m.makeArray(c,[b]),k=m.event.special[p]||{},e||!k.trigger||k.trigger.apply(d,c)!==!1)){if(!e&&!k.noBubble&&!m.isWindow(d)){for(i=k.delegateType||p,$.test(i+p)||(h=h.parentNode);h;h=h.parentNode)o.push(h),l=h;l===(d.ownerDocument||y)&&o.push(l.defaultView||l.parentWindow||a)}n=0;while((h=o[n++])&&!b.isPropagationStopped())b.type=n>1?i:k.bindType||p,f=(m._data(h,"events")||{})[b.type]&&m._data(h,"handle"),f&&f.apply(h,c),f=g&&h[g],f&&f.apply&&m.acceptData(h)&&(b.result=f.apply(h,c),b.result===!1&&b.preventDefault());if(b.type=p,!e&&!b.isDefaultPrevented()&&(!k._default||k._default.apply(o.pop(),c)===!1)&&m.acceptData(d)&&g&&d[p]&&!m.isWindow(d)){l=d[g],l&&(d[g]=null),m.event.triggered=p;try{d[p]()}catch(r){}m.event.triggered=void 0,l&&(d[g]=l)}return b.result}},dispatch:function(a){a=m.event.fix(a);var b,c,e,f,g,h=[],i=d.call(arguments),j=(m._data(this,"events")||{})[a.type]||[],k=m.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=m.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,g=0;while((e=f.handlers[g++])&&!a.isImmediatePropagationStopped())(!a.namespace_re||a.namespace_re.test(e.namespace))&&(a.handleObj=e,a.data=e.data,c=((m.event.special[e.origType]||{}).handle||e.handler).apply(f.elem,i),void 0!==c&&(a.result=c)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&(!a.button||"click"!==a.type))for(;i!=this;i=i.parentNode||this)if(1===i.nodeType&&(i.disabled!==!0||"click"!==a.type)){for(e=[],f=0;h>f;f++)d=b[f],c=d.selector+" ",void 0===e[c]&&(e[c]=d.needsContext?m(c,this).index(i)>=0:m.find(c,this,null,[i]).length),e[c]&&e.push(d);e.length&&g.push({elem:i,handlers:e})}return h<b.length&&g.push({elem:this,handlers:b.slice(h)}),g},fix:function(a){if(a[m.expando])return a;var b,c,d,e=a.type,f=a,g=this.fixHooks[e];g||(this.fixHooks[e]=g=Z.test(e)?this.mouseHooks:Y.test(e)?this.keyHooks:{}),d=g.props?this.props.concat(g.props):this.props,a=new m.Event(f),b=d.length;while(b--)c=d[b],a[c]=f[c];return a.target||(a.target=f.srcElement||y),3===a.target.nodeType&&(a.target=a.target.parentNode),a.metaKey=!!a.metaKey,g.filter?g.filter(a,f):a},props:"altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(a,b){return null==a.which&&(a.which=null!=b.charCode?b.charCode:b.keyCode),a}},mouseHooks:{props:"button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "),filter:function(a,b){var c,d,e,f=b.button,g=b.fromElement;return null==a.pageX&&null!=b.clientX&&(d=a.target.ownerDocument||y,e=d.documentElement,c=d.body,a.pageX=b.clientX+(e&&e.scrollLeft||c&&c.scrollLeft||0)-(e&&e.clientLeft||c&&c.clientLeft||0),a.pageY=b.clientY+(e&&e.scrollTop||c&&c.scrollTop||0)-(e&&e.clientTop||c&&c.clientTop||0)),!a.relatedTarget&&g&&(a.relatedTarget=g===a.target?b.toElement:g),a.which||void 0===f||(a.which=1&f?1:2&f?3:4&f?2:0),a}},special:{load:{noBubble:!0},focus:{trigger:function(){if(this!==cb()&&this.focus)try{return this.focus(),!1}catch(a){}},delegateType:"focusin"},blur:{trigger:function(){return this===cb()&&this.blur?(this.blur(),!1):void 0},delegateType:"focusout"},click:{trigger:function(){return m.nodeName(this,"input")&&"checkbox"===this.type&&this.click?(this.click(),!1):void 0},_default:function(a){return m.nodeName(a.target,"a")}},beforeunload:{postDispatch:function(a){void 0!==a.result&&a.originalEvent&&(a.originalEvent.returnValue=a.result)}}},simulate:function(a,b,c,d){var e=m.extend(new m.Event,c,{type:a,isSimulated:!0,originalEvent:{}});d?m.event.trigger(e,null,b):m.event.dispatch.call(b,e),e.isDefaultPrevented()&&c.preventDefault()}},m.removeEvent=y.removeEventListener?function(a,b,c){a.removeEventListener&&a.removeEventListener(b,c,!1)}:function(a,b,c){var d="on"+b;a.detachEvent&&(typeof a[d]===K&&(a[d]=null),a.detachEvent(d,c))},m.Event=function(a,b){return this instanceof m.Event?(a&&a.type?(this.originalEvent=a,this.type=a.type,this.isDefaultPrevented=a.defaultPrevented||void 0===a.defaultPrevented&&a.returnValue===!1?ab:bb):this.type=a,b&&m.extend(this,b),this.timeStamp=a&&a.timeStamp||m.now(),void(this[m.expando]=!0)):new m.Event(a,b)},m.Event.prototype={isDefaultPrevented:bb,isPropagationStopped:bb,isImmediatePropagationStopped:bb,preventDefault:function(){var a=this.originalEvent;this.isDefaultPrevented=ab,a&&(a.preventDefault?a.preventDefault():a.returnValue=!1)},stopPropagation:function(){var a=this.originalEvent;this.isPropagationStopped=ab,a&&(a.stopPropagation&&a.stopPropagation(),a.cancelBubble=!0)},stopImmediatePropagation:function(){var a=this.originalEvent;this.isImmediatePropagationStopped=ab,a&&a.stopImmediatePropagation&&a.stopImmediatePropagation(),this.stopPropagation()}},m.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(a,b){m.event.special[a]={delegateType:b,bindType:b,handle:function(a){var c,d=this,e=a.relatedTarget,f=a.handleObj;return(!e||e!==d&&!m.contains(d,e))&&(a.type=f.origType,c=f.handler.apply(this,arguments),a.type=b),c}}}),k.submitBubbles||(m.event.special.submit={setup:function(){return m.nodeName(this,"form")?!1:void m.event.add(this,"click._submit keypress._submit",function(a){var b=a.target,c=m.nodeName(b,"input")||m.nodeName(b,"button")?b.form:void 0;c&&!m._data(c,"submitBubbles")&&(m.event.add(c,"submit._submit",function(a){a._submit_bubble=!0}),m._data(c,"submitBubbles",!0))})},postDispatch:function(a){a._submit_bubble&&(delete a._submit_bubble,this.parentNode&&!a.isTrigger&&m.event.simulate("submit",this.parentNode,a,!0))},teardown:function(){return m.nodeName(this,"form")?!1:void m.event.remove(this,"._submit")}}),k.changeBubbles||(m.event.special.change={setup:function(){return X.test(this.nodeName)?(("checkbox"===this.type||"radio"===this.type)&&(m.event.add(this,"propertychange._change",function(a){"checked"===a.originalEvent.propertyName&&(this._just_changed=!0)}),m.event.add(this,"click._change",function(a){this._just_changed&&!a.isTrigger&&(this._just_changed=!1),m.event.simulate("change",this,a,!0)})),!1):void m.event.add(this,"beforeactivate._change",function(a){var b=a.target;X.test(b.nodeName)&&!m._data(b,"changeBubbles")&&(m.event.add(b,"change._change",function(a){!this.parentNode||a.isSimulated||a.isTrigger||m.event.simulate("change",this.parentNode,a,!0)}),m._data(b,"changeBubbles",!0))})},handle:function(a){var b=a.target;return this!==b||a.isSimulated||a.isTrigger||"radio"!==b.type&&"checkbox"!==b.type?a.handleObj.handler.apply(this,arguments):void 0},teardown:function(){return m.event.remove(this,"._change"),!X.test(this.nodeName)}}),k.focusinBubbles||m.each({focus:"focusin",blur:"focusout"},function(a,b){var c=function(a){m.event.simulate(b,a.target,m.event.fix(a),!0)};m.event.special[b]={setup:function(){var d=this.ownerDocument||this,e=m._data(d,b);e||d.addEventListener(a,c,!0),m._data(d,b,(e||0)+1)},teardown:function(){var d=this.ownerDocument||this,e=m._data(d,b)-1;e?m._data(d,b,e):(d.removeEventListener(a,c,!0),m._removeData(d,b))}}}),m.fn.extend({on:function(a,b,c,d,e){var f,g;if("object"==typeof a){"string"!=typeof b&&(c=c||b,b=void 0);for(f in a)this.on(f,b,c,a[f],e);return this}if(null==c&&null==d?(d=b,c=b=void 0):null==d&&("string"==typeof b?(d=c,c=void 0):(d=c,c=b,b=void 0)),d===!1)d=bb;else if(!d)return this;return 1===e&&(g=d,d=function(a){return m().off(a),g.apply(this,arguments)},d.guid=g.guid||(g.guid=m.guid++)),this.each(function(){m.event.add(this,a,d,c,b)})},one:function(a,b,c,d){return this.on(a,b,c,d,1)},off:function(a,b,c){var d,e;if(a&&a.preventDefault&&a.handleObj)return d=a.handleObj,m(a.delegateTarget).off(d.namespace?d.origType+"."+d.namespace:d.origType,d.selector,d.handler),this;if("object"==typeof a){for(e in a)this.off(e,b,a[e]);return this}return(b===!1||"function"==typeof b)&&(c=b,b=void 0),c===!1&&(c=bb),this.each(function(){m.event.remove(this,a,c,b)})},trigger:function(a,b){return this.each(function(){m.event.trigger(a,b,this)})},triggerHandler:function(a,b){var c=this[0];return c?m.event.trigger(a,b,c,!0):void 0}});function db(a){var b=eb.split("|"),c=a.createDocumentFragment();if(c.createElement)while(b.length)c.createElement(b.pop());return c}var eb="abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|header|hgroup|mark|meter|nav|output|progress|section|summary|time|video",fb=/ jQuery\d+="(?:null|\d+)"/g,gb=new RegExp("<(?:"+eb+")[\\s/>]","i"),hb=/^\s+/,ib=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,jb=/<([\w:]+)/,kb=/<tbody/i,lb=/<|&#?\w+;/,mb=/<(?:script|style|link)/i,nb=/checked\s*(?:[^=]|=\s*.checked.)/i,ob=/^$|\/(?:java|ecma)script/i,pb=/^true\/(.*)/,qb=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g,rb={option:[1,"<select multiple='multiple'>","</select>"],legend:[1,"<fieldset>","</fieldset>"],area:[1,"<map>","</map>"],param:[1,"<object>","</object>"],thead:[1,"<table>","</table>"],tr:[2,"<table><tbody>","</tbody></table>"],col:[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:k.htmlSerialize?[0,"",""]:[1,"X<div>","</div>"]},sb=db(y),tb=sb.appendChild(y.createElement("div"));rb.optgroup=rb.option,rb.tbody=rb.tfoot=rb.colgroup=rb.caption=rb.thead,rb.th=rb.td;function ub(a,b){var c,d,e=0,f=typeof a.getElementsByTagName!==K?a.getElementsByTagName(b||"*"):typeof a.querySelectorAll!==K?a.querySelectorAll(b||"*"):void 0;if(!f)for(f=[],c=a.childNodes||a;null!=(d=c[e]);e++)!b||m.nodeName(d,b)?f.push(d):m.merge(f,ub(d,b));return void 0===b||b&&m.nodeName(a,b)?m.merge([a],f):f}function vb(a){W.test(a.type)&&(a.defaultChecked=a.checked)}function wb(a,b){return m.nodeName(a,"table")&&m.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function xb(a){return a.type=(null!==m.find.attr(a,"type"))+"/"+a.type,a}function yb(a){var b=pb.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function zb(a,b){for(var c,d=0;null!=(c=a[d]);d++)m._data(c,"globalEval",!b||m._data(b[d],"globalEval"))}function Ab(a,b){if(1===b.nodeType&&m.hasData(a)){var c,d,e,f=m._data(a),g=m._data(b,f),h=f.events;if(h){delete g.handle,g.events={};for(c in h)for(d=0,e=h[c].length;e>d;d++)m.event.add(b,c,h[c][d])}g.data&&(g.data=m.extend({},g.data))}}function Bb(a,b){var c,d,e;if(1===b.nodeType){if(c=b.nodeName.toLowerCase(),!k.noCloneEvent&&b[m.expando]){e=m._data(b);for(d in e.events)m.removeEvent(b,d,e.handle);b.removeAttribute(m.expando)}"script"===c&&b.text!==a.text?(xb(b).text=a.text,yb(b)):"object"===c?(b.parentNode&&(b.outerHTML=a.outerHTML),k.html5Clone&&a.innerHTML&&!m.trim(b.innerHTML)&&(b.innerHTML=a.innerHTML)):"input"===c&&W.test(a.type)?(b.defaultChecked=b.checked=a.checked,b.value!==a.value&&(b.value=a.value)):"option"===c?b.defaultSelected=b.selected=a.defaultSelected:("input"===c||"textarea"===c)&&(b.defaultValue=a.defaultValue)}}m.extend({clone:function(a,b,c){var d,e,f,g,h,i=m.contains(a.ownerDocument,a);if(k.html5Clone||m.isXMLDoc(a)||!gb.test("<"+a.nodeName+">")?f=a.cloneNode(!0):(tb.innerHTML=a.outerHTML,tb.removeChild(f=tb.firstChild)),!(k.noCloneEvent&&k.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||m.isXMLDoc(a)))for(d=ub(f),h=ub(a),g=0;null!=(e=h[g]);++g)d[g]&&Bb(e,d[g]);if(b)if(c)for(h=h||ub(a),d=d||ub(f),g=0;null!=(e=h[g]);g++)Ab(e,d[g]);else Ab(a,f);return d=ub(f,"script"),d.length>0&&zb(d,!i&&ub(a,"script")),d=h=e=null,f},buildFragment:function(a,b,c,d){for(var e,f,g,h,i,j,l,n=a.length,o=db(b),p=[],q=0;n>q;q++)if(f=a[q],f||0===f)if("object"===m.type(f))m.merge(p,f.nodeType?[f]:f);else if(lb.test(f)){h=h||o.appendChild(b.createElement("div")),i=(jb.exec(f)||["",""])[1].toLowerCase(),l=rb[i]||rb._default,h.innerHTML=l[1]+f.replace(ib,"<$1></$2>")+l[2],e=l[0];while(e--)h=h.lastChild;if(!k.leadingWhitespace&&hb.test(f)&&p.push(b.createTextNode(hb.exec(f)[0])),!k.tbody){f="table"!==i||kb.test(f)?"<table>"!==l[1]||kb.test(f)?0:h:h.firstChild,e=f&&f.childNodes.length;while(e--)m.nodeName(j=f.childNodes[e],"tbody")&&!j.childNodes.length&&f.removeChild(j)}m.merge(p,h.childNodes),h.textContent="";while(h.firstChild)h.removeChild(h.firstChild);h=o.lastChild}else p.push(b.createTextNode(f));h&&o.removeChild(h),k.appendChecked||m.grep(ub(p,"input"),vb),q=0;while(f=p[q++])if((!d||-1===m.inArray(f,d))&&(g=m.contains(f.ownerDocument,f),h=ub(o.appendChild(f),"script"),g&&zb(h),c)){e=0;while(f=h[e++])ob.test(f.type||"")&&c.push(f)}return h=null,o},cleanData:function(a,b){for(var d,e,f,g,h=0,i=m.expando,j=m.cache,l=k.deleteExpando,n=m.event.special;null!=(d=a[h]);h++)if((b||m.acceptData(d))&&(f=d[i],g=f&&j[f])){if(g.events)for(e in g.events)n[e]?m.event.remove(d,e):m.removeEvent(d,e,g.handle);j[f]&&(delete j[f],l?delete d[i]:typeof d.removeAttribute!==K?d.removeAttribute(i):d[i]=null,c.push(f))}}}),m.fn.extend({text:function(a){return V(this,function(a){return void 0===a?m.text(this):this.empty().append((this[0]&&this[0].ownerDocument||y).createTextNode(a))},null,a,arguments.length)},append:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=wb(this,a);b.appendChild(a)}})},prepend:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=wb(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},remove:function(a,b){for(var c,d=a?m.filter(a,this):this,e=0;null!=(c=d[e]);e++)b||1!==c.nodeType||m.cleanData(ub(c)),c.parentNode&&(b&&m.contains(c.ownerDocument,c)&&zb(ub(c,"script")),c.parentNode.removeChild(c));return this},empty:function(){for(var a,b=0;null!=(a=this[b]);b++){1===a.nodeType&&m.cleanData(ub(a,!1));while(a.firstChild)a.removeChild(a.firstChild);a.options&&m.nodeName(a,"select")&&(a.options.length=0)}return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return m.clone(this,a,b)})},html:function(a){return V(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a)return 1===b.nodeType?b.innerHTML.replace(fb,""):void 0;if(!("string"!=typeof a||mb.test(a)||!k.htmlSerialize&&gb.test(a)||!k.leadingWhitespace&&hb.test(a)||rb[(jb.exec(a)||["",""])[1].toLowerCase()])){a=a.replace(ib,"<$1></$2>");try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(m.cleanData(ub(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=arguments[0];return this.domManip(arguments,function(b){a=this.parentNode,m.cleanData(ub(this)),a&&a.replaceChild(b,this)}),a&&(a.length||a.nodeType)?this:this.remove()},detach:function(a){return this.remove(a,!0)},domManip:function(a,b){a=e.apply([],a);var c,d,f,g,h,i,j=0,l=this.length,n=this,o=l-1,p=a[0],q=m.isFunction(p);if(q||l>1&&"string"==typeof p&&!k.checkClone&&nb.test(p))return this.each(function(c){var d=n.eq(c);q&&(a[0]=p.call(this,c,d.html())),d.domManip(a,b)});if(l&&(i=m.buildFragment(a,this[0].ownerDocument,!1,this),c=i.firstChild,1===i.childNodes.length&&(i=c),c)){for(g=m.map(ub(i,"script"),xb),f=g.length;l>j;j++)d=i,j!==o&&(d=m.clone(d,!0,!0),f&&m.merge(g,ub(d,"script"))),b.call(this[j],d,j);if(f)for(h=g[g.length-1].ownerDocument,m.map(g,yb),j=0;f>j;j++)d=g[j],ob.test(d.type||"")&&!m._data(d,"globalEval")&&m.contains(h,d)&&(d.src?m._evalUrl&&m._evalUrl(d.src):m.globalEval((d.text||d.textContent||d.innerHTML||"").replace(qb,"")));i=c=null}return this}}),m.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){m.fn[a]=function(a){for(var c,d=0,e=[],g=m(a),h=g.length-1;h>=d;d++)c=d===h?this:this.clone(!0),m(g[d])[b](c),f.apply(e,c.get());return this.pushStack(e)}});var Cb,Db={};function Eb(b,c){var d,e=m(c.createElement(b)).appendTo(c.body),f=a.getDefaultComputedStyle&&(d=a.getDefaultComputedStyle(e[0]))?d.display:m.css(e[0],"display");return e.detach(),f}function Fb(a){var b=y,c=Db[a];return c||(c=Eb(a,b),"none"!==c&&c||(Cb=(Cb||m("<iframe frameborder='0' width='0' height='0'/>")).appendTo(b.documentElement),b=(Cb[0].contentWindow||Cb[0].contentDocument).document,b.write(),b.close(),c=Eb(a,b),Cb.detach()),Db[a]=c),c}!function(){var a;k.shrinkWrapBlocks=function(){if(null!=a)return a;a=!1;var b,c,d;return c=y.getElementsByTagName("body")[0],c&&c.style?(b=y.createElement("div"),d=y.createElement("div"),d.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(d).appendChild(b),typeof b.style.zoom!==K&&(b.style.cssText="-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;display:block;margin:0;border:0;padding:1px;width:1px;zoom:1",b.appendChild(y.createElement("div")).style.width="5px",a=3!==b.offsetWidth),c.removeChild(d),a):void 0}}();var Gb=/^margin/,Hb=new RegExp("^("+S+")(?!px)[a-z%]+$","i"),Ib,Jb,Kb=/^(top|right|bottom|left)$/;a.getComputedStyle?(Ib=function(a){return a.ownerDocument.defaultView.getComputedStyle(a,null)},Jb=function(a,b,c){var d,e,f,g,h=a.style;return c=c||Ib(a),g=c?c.getPropertyValue(b)||c[b]:void 0,c&&(""!==g||m.contains(a.ownerDocument,a)||(g=m.style(a,b)),Hb.test(g)&&Gb.test(b)&&(d=h.width,e=h.minWidth,f=h.maxWidth,h.minWidth=h.maxWidth=h.width=g,g=c.width,h.width=d,h.minWidth=e,h.maxWidth=f)),void 0===g?g:g+""}):y.documentElement.currentStyle&&(Ib=function(a){return a.currentStyle},Jb=function(a,b,c){var d,e,f,g,h=a.style;return c=c||Ib(a),g=c?c[b]:void 0,null==g&&h&&h[b]&&(g=h[b]),Hb.test(g)&&!Kb.test(b)&&(d=h.left,e=a.runtimeStyle,f=e&&e.left,f&&(e.left=a.currentStyle.left),h.left="fontSize"===b?"1em":g,g=h.pixelLeft+"px",h.left=d,f&&(e.left=f)),void 0===g?g:g+""||"auto"});function Lb(a,b){return{get:function(){var c=a();if(null!=c)return c?void delete this.get:(this.get=b).apply(this,arguments)}}}!function(){var b,c,d,e,f,g,h;if(b=y.createElement("div"),b.innerHTML=" <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",d=b.getElementsByTagName("a")[0],c=d&&d.style){c.cssText="float:left;opacity:.5",k.opacity="0.5"===c.opacity,k.cssFloat=!!c.cssFloat,b.style.backgroundClip="content-box",b.cloneNode(!0).style.backgroundClip="",k.clearCloneStyle="content-box"===b.style.backgroundClip,k.boxSizing=""===c.boxSizing||""===c.MozBoxSizing||""===c.WebkitBoxSizing,m.extend(k,{reliableHiddenOffsets:function(){return null==g&&i(),g},boxSizingReliable:function(){return null==f&&i(),f},pixelPosition:function(){return null==e&&i(),e},reliableMarginRight:function(){return null==h&&i(),h}});function i(){var b,c,d,i;c=y.getElementsByTagName("body")[0],c&&c.style&&(b=y.createElement("div"),d=y.createElement("div"),d.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(d).appendChild(b),b.style.cssText="-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;display:block;margin-top:1%;top:1%;border:1px;padding:1px;width:4px;position:absolute",e=f=!1,h=!0,a.getComputedStyle&&(e="1%"!==(a.getComputedStyle(b,null)||{}).top,f="4px"===(a.getComputedStyle(b,null)||{width:"4px"}).width,i=b.appendChild(y.createElement("div")),i.style.cssText=b.style.cssText="-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;display:block;margin:0;border:0;padding:0",i.style.marginRight=i.style.width="0",b.style.width="1px",h=!parseFloat((a.getComputedStyle(i,null)||{}).marginRight)),b.innerHTML="<table><tr><td></td><td>t</td></tr></table>",i=b.getElementsByTagName("td"),i[0].style.cssText="margin:0;border:0;padding:0;display:none",g=0===i[0].offsetHeight,g&&(i[0].style.display="",i[1].style.display="none",g=0===i[0].offsetHeight),c.removeChild(d))}}}(),m.swap=function(a,b,c,d){var e,f,g={};for(f in b)g[f]=a.style[f],a.style[f]=b[f];e=c.apply(a,d||[]);for(f in b)a.style[f]=g[f];return e};var Mb=/alpha\([^)]*\)/i,Nb=/opacity\s*=\s*([^)]*)/,Ob=/^(none|table(?!-c[ea]).+)/,Pb=new RegExp("^("+S+")(.*)$","i"),Qb=new RegExp("^([+-])=("+S+")","i"),Rb={position:"absolute",visibility:"hidden",display:"block"},Sb={letterSpacing:"0",fontWeight:"400"},Tb=["Webkit","O","Moz","ms"];function Ub(a,b){if(b in a)return b;var c=b.charAt(0).toUpperCase()+b.slice(1),d=b,e=Tb.length;while(e--)if(b=Tb[e]+c,b in a)return b;return d}function Vb(a,b){for(var c,d,e,f=[],g=0,h=a.length;h>g;g++)d=a[g],d.style&&(f[g]=m._data(d,"olddisplay"),c=d.style.display,b?(f[g]||"none"!==c||(d.style.display=""),""===d.style.display&&U(d)&&(f[g]=m._data(d,"olddisplay",Fb(d.nodeName)))):(e=U(d),(c&&"none"!==c||!e)&&m._data(d,"olddisplay",e?c:m.css(d,"display"))));for(g=0;h>g;g++)d=a[g],d.style&&(b&&"none"!==d.style.display&&""!==d.style.display||(d.style.display=b?f[g]||"":"none"));return a}function Wb(a,b,c){var d=Pb.exec(b);return d?Math.max(0,d[1]-(c||0))+(d[2]||"px"):b}function Xb(a,b,c,d,e){for(var f=c===(d?"border":"content")?4:"width"===b?1:0,g=0;4>f;f+=2)"margin"===c&&(g+=m.css(a,c+T[f],!0,e)),d?("content"===c&&(g-=m.css(a,"padding"+T[f],!0,e)),"margin"!==c&&(g-=m.css(a,"border"+T[f]+"Width",!0,e))):(g+=m.css(a,"padding"+T[f],!0,e),"padding"!==c&&(g+=m.css(a,"border"+T[f]+"Width",!0,e)));return g}function Yb(a,b,c){var d=!0,e="width"===b?a.offsetWidth:a.offsetHeight,f=Ib(a),g=k.boxSizing&&"border-box"===m.css(a,"boxSizing",!1,f);if(0>=e||null==e){if(e=Jb(a,b,f),(0>e||null==e)&&(e=a.style[b]),Hb.test(e))return e;d=g&&(k.boxSizingReliable()||e===a.style[b]),e=parseFloat(e)||0}return e+Xb(a,b,c||(g?"border":"content"),d,f)+"px"}m.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=Jb(a,"opacity");return""===c?"1":c}}}},cssNumber:{columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":k.cssFloat?"cssFloat":"styleFloat"},style:function(a,b,c,d){if(a&&3!==a.nodeType&&8!==a.nodeType&&a.style){var e,f,g,h=m.camelCase(b),i=a.style;if(b=m.cssProps[h]||(m.cssProps[h]=Ub(i,h)),g=m.cssHooks[b]||m.cssHooks[h],void 0===c)return g&&"get"in g&&void 0!==(e=g.get(a,!1,d))?e:i[b];if(f=typeof c,"string"===f&&(e=Qb.exec(c))&&(c=(e[1]+1)*e[2]+parseFloat(m.css(a,b)),f="number"),null!=c&&c===c&&("number"!==f||m.cssNumber[h]||(c+="px"),k.clearCloneStyle||""!==c||0!==b.indexOf("background")||(i[b]="inherit"),!(g&&"set"in g&&void 0===(c=g.set(a,c,d)))))try{i[b]=c}catch(j){}}},css:function(a,b,c,d){var e,f,g,h=m.camelCase(b);return b=m.cssProps[h]||(m.cssProps[h]=Ub(a.style,h)),g=m.cssHooks[b]||m.cssHooks[h],g&&"get"in g&&(f=g.get(a,!0,c)),void 0===f&&(f=Jb(a,b,d)),"normal"===f&&b in Sb&&(f=Sb[b]),""===c||c?(e=parseFloat(f),c===!0||m.isNumeric(e)?e||0:f):f}}),m.each(["height","width"],function(a,b){m.cssHooks[b]={get:function(a,c,d){return c?Ob.test(m.css(a,"display"))&&0===a.offsetWidth?m.swap(a,Rb,function(){return Yb(a,b,d)}):Yb(a,b,d):void 0},set:function(a,c,d){var e=d&&Ib(a);return Wb(a,c,d?Xb(a,b,d,k.boxSizing&&"border-box"===m.css(a,"boxSizing",!1,e),e):0)}}}),k.opacity||(m.cssHooks.opacity={get:function(a,b){return Nb.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||"")?.01*parseFloat(RegExp.$1)+"":b?"1":""},set:function(a,b){var c=a.style,d=a.currentStyle,e=m.isNumeric(b)?"alpha(opacity="+100*b+")":"",f=d&&d.filter||c.filter||"";c.zoom=1,(b>=1||""===b)&&""===m.trim(f.replace(Mb,""))&&c.removeAttribute&&(c.removeAttribute("filter"),""===b||d&&!d.filter)||(c.filter=Mb.test(f)?f.replace(Mb,e):f+" "+e)}}),m.cssHooks.marginRight=Lb(k.reliableMarginRight,function(a,b){return b?m.swap(a,{display:"inline-block"},Jb,[a,"marginRight"]):void 0}),m.each({margin:"",padding:"",border:"Width"},function(a,b){m.cssHooks[a+b]={expand:function(c){for(var d=0,e={},f="string"==typeof c?c.split(" "):[c];4>d;d++)e[a+T[d]+b]=f[d]||f[d-2]||f[0];return e}},Gb.test(a)||(m.cssHooks[a+b].set=Wb)}),m.fn.extend({css:function(a,b){return V(this,function(a,b,c){var d,e,f={},g=0;if(m.isArray(b)){for(d=Ib(a),e=b.length;e>g;g++)f[b[g]]=m.css(a,b[g],!1,d);return f}return void 0!==c?m.style(a,b,c):m.css(a,b)},a,b,arguments.length>1)},show:function(){return Vb(this,!0)},hide:function(){return Vb(this)},toggle:function(a){return"boolean"==typeof a?a?this.show():this.hide():this.each(function(){U(this)?m(this).show():m(this).hide()})}});function Zb(a,b,c,d,e){return new Zb.prototype.init(a,b,c,d,e)}m.Tween=Zb,Zb.prototype={constructor:Zb,init:function(a,b,c,d,e,f){this.elem=a,this.prop=c,this.easing=e||"swing",this.options=b,this.start=this.now=this.cur(),this.end=d,this.unit=f||(m.cssNumber[c]?"":"px")
4
+ },cur:function(){var a=Zb.propHooks[this.prop];return a&&a.get?a.get(this):Zb.propHooks._default.get(this)},run:function(a){var b,c=Zb.propHooks[this.prop];return this.pos=b=this.options.duration?m.easing[this.easing](a,this.options.duration*a,0,1,this.options.duration):a,this.now=(this.end-this.start)*b+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),c&&c.set?c.set(this):Zb.propHooks._default.set(this),this}},Zb.prototype.init.prototype=Zb.prototype,Zb.propHooks={_default:{get:function(a){var b;return null==a.elem[a.prop]||a.elem.style&&null!=a.elem.style[a.prop]?(b=m.css(a.elem,a.prop,""),b&&"auto"!==b?b:0):a.elem[a.prop]},set:function(a){m.fx.step[a.prop]?m.fx.step[a.prop](a):a.elem.style&&(null!=a.elem.style[m.cssProps[a.prop]]||m.cssHooks[a.prop])?m.style(a.elem,a.prop,a.now+a.unit):a.elem[a.prop]=a.now}}},Zb.propHooks.scrollTop=Zb.propHooks.scrollLeft={set:function(a){a.elem.nodeType&&a.elem.parentNode&&(a.elem[a.prop]=a.now)}},m.easing={linear:function(a){return a},swing:function(a){return.5-Math.cos(a*Math.PI)/2}},m.fx=Zb.prototype.init,m.fx.step={};var $b,_b,ac=/^(?:toggle|show|hide)$/,bc=new RegExp("^(?:([+-])=|)("+S+")([a-z%]*)$","i"),cc=/queueHooks$/,dc=[ic],ec={"*":[function(a,b){var c=this.createTween(a,b),d=c.cur(),e=bc.exec(b),f=e&&e[3]||(m.cssNumber[a]?"":"px"),g=(m.cssNumber[a]||"px"!==f&&+d)&&bc.exec(m.css(c.elem,a)),h=1,i=20;if(g&&g[3]!==f){f=f||g[3],e=e||[],g=+d||1;do h=h||".5",g/=h,m.style(c.elem,a,g+f);while(h!==(h=c.cur()/d)&&1!==h&&--i)}return e&&(g=c.start=+g||+d||0,c.unit=f,c.end=e[1]?g+(e[1]+1)*e[2]:+e[2]),c}]};function fc(){return setTimeout(function(){$b=void 0}),$b=m.now()}function gc(a,b){var c,d={height:a},e=0;for(b=b?1:0;4>e;e+=2-b)c=T[e],d["margin"+c]=d["padding"+c]=a;return b&&(d.opacity=d.width=a),d}function hc(a,b,c){for(var d,e=(ec[b]||[]).concat(ec["*"]),f=0,g=e.length;g>f;f++)if(d=e[f].call(c,b,a))return d}function ic(a,b,c){var d,e,f,g,h,i,j,l,n=this,o={},p=a.style,q=a.nodeType&&U(a),r=m._data(a,"fxshow");c.queue||(h=m._queueHooks(a,"fx"),null==h.unqueued&&(h.unqueued=0,i=h.empty.fire,h.empty.fire=function(){h.unqueued||i()}),h.unqueued++,n.always(function(){n.always(function(){h.unqueued--,m.queue(a,"fx").length||h.empty.fire()})})),1===a.nodeType&&("height"in b||"width"in b)&&(c.overflow=[p.overflow,p.overflowX,p.overflowY],j=m.css(a,"display"),l="none"===j?m._data(a,"olddisplay")||Fb(a.nodeName):j,"inline"===l&&"none"===m.css(a,"float")&&(k.inlineBlockNeedsLayout&&"inline"!==Fb(a.nodeName)?p.zoom=1:p.display="inline-block")),c.overflow&&(p.overflow="hidden",k.shrinkWrapBlocks()||n.always(function(){p.overflow=c.overflow[0],p.overflowX=c.overflow[1],p.overflowY=c.overflow[2]}));for(d in b)if(e=b[d],ac.exec(e)){if(delete b[d],f=f||"toggle"===e,e===(q?"hide":"show")){if("show"!==e||!r||void 0===r[d])continue;q=!0}o[d]=r&&r[d]||m.style(a,d)}else j=void 0;if(m.isEmptyObject(o))"inline"===("none"===j?Fb(a.nodeName):j)&&(p.display=j);else{r?"hidden"in r&&(q=r.hidden):r=m._data(a,"fxshow",{}),f&&(r.hidden=!q),q?m(a).show():n.done(function(){m(a).hide()}),n.done(function(){var b;m._removeData(a,"fxshow");for(b in o)m.style(a,b,o[b])});for(d in o)g=hc(q?r[d]:0,d,n),d in r||(r[d]=g.start,q&&(g.end=g.start,g.start="width"===d||"height"===d?1:0))}}function jc(a,b){var c,d,e,f,g;for(c in a)if(d=m.camelCase(c),e=b[d],f=a[c],m.isArray(f)&&(e=f[1],f=a[c]=f[0]),c!==d&&(a[d]=f,delete a[c]),g=m.cssHooks[d],g&&"expand"in g){f=g.expand(f),delete a[d];for(c in f)c in a||(a[c]=f[c],b[c]=e)}else b[d]=e}function kc(a,b,c){var d,e,f=0,g=dc.length,h=m.Deferred().always(function(){delete i.elem}),i=function(){if(e)return!1;for(var b=$b||fc(),c=Math.max(0,j.startTime+j.duration-b),d=c/j.duration||0,f=1-d,g=0,i=j.tweens.length;i>g;g++)j.tweens[g].run(f);return h.notifyWith(a,[j,f,c]),1>f&&i?c:(h.resolveWith(a,[j]),!1)},j=h.promise({elem:a,props:m.extend({},b),opts:m.extend(!0,{specialEasing:{}},c),originalProperties:b,originalOptions:c,startTime:$b||fc(),duration:c.duration,tweens:[],createTween:function(b,c){var d=m.Tween(a,j.opts,b,c,j.opts.specialEasing[b]||j.opts.easing);return j.tweens.push(d),d},stop:function(b){var c=0,d=b?j.tweens.length:0;if(e)return this;for(e=!0;d>c;c++)j.tweens[c].run(1);return b?h.resolveWith(a,[j,b]):h.rejectWith(a,[j,b]),this}}),k=j.props;for(jc(k,j.opts.specialEasing);g>f;f++)if(d=dc[f].call(j,a,k,j.opts))return d;return m.map(k,hc,j),m.isFunction(j.opts.start)&&j.opts.start.call(a,j),m.fx.timer(m.extend(i,{elem:a,anim:j,queue:j.opts.queue})),j.progress(j.opts.progress).done(j.opts.done,j.opts.complete).fail(j.opts.fail).always(j.opts.always)}m.Animation=m.extend(kc,{tweener:function(a,b){m.isFunction(a)?(b=a,a=["*"]):a=a.split(" ");for(var c,d=0,e=a.length;e>d;d++)c=a[d],ec[c]=ec[c]||[],ec[c].unshift(b)},prefilter:function(a,b){b?dc.unshift(a):dc.push(a)}}),m.speed=function(a,b,c){var d=a&&"object"==typeof a?m.extend({},a):{complete:c||!c&&b||m.isFunction(a)&&a,duration:a,easing:c&&b||b&&!m.isFunction(b)&&b};return d.duration=m.fx.off?0:"number"==typeof d.duration?d.duration:d.duration in m.fx.speeds?m.fx.speeds[d.duration]:m.fx.speeds._default,(null==d.queue||d.queue===!0)&&(d.queue="fx"),d.old=d.complete,d.complete=function(){m.isFunction(d.old)&&d.old.call(this),d.queue&&m.dequeue(this,d.queue)},d},m.fn.extend({fadeTo:function(a,b,c,d){return this.filter(U).css("opacity",0).show().end().animate({opacity:b},a,c,d)},animate:function(a,b,c,d){var e=m.isEmptyObject(a),f=m.speed(b,c,d),g=function(){var b=kc(this,m.extend({},a),f);(e||m._data(this,"finish"))&&b.stop(!0)};return g.finish=g,e||f.queue===!1?this.each(g):this.queue(f.queue,g)},stop:function(a,b,c){var d=function(a){var b=a.stop;delete a.stop,b(c)};return"string"!=typeof a&&(c=b,b=a,a=void 0),b&&a!==!1&&this.queue(a||"fx",[]),this.each(function(){var b=!0,e=null!=a&&a+"queueHooks",f=m.timers,g=m._data(this);if(e)g[e]&&g[e].stop&&d(g[e]);else for(e in g)g[e]&&g[e].stop&&cc.test(e)&&d(g[e]);for(e=f.length;e--;)f[e].elem!==this||null!=a&&f[e].queue!==a||(f[e].anim.stop(c),b=!1,f.splice(e,1));(b||!c)&&m.dequeue(this,a)})},finish:function(a){return a!==!1&&(a=a||"fx"),this.each(function(){var b,c=m._data(this),d=c[a+"queue"],e=c[a+"queueHooks"],f=m.timers,g=d?d.length:0;for(c.finish=!0,m.queue(this,a,[]),e&&e.stop&&e.stop.call(this,!0),b=f.length;b--;)f[b].elem===this&&f[b].queue===a&&(f[b].anim.stop(!0),f.splice(b,1));for(b=0;g>b;b++)d[b]&&d[b].finish&&d[b].finish.call(this);delete c.finish})}}),m.each(["toggle","show","hide"],function(a,b){var c=m.fn[b];m.fn[b]=function(a,d,e){return null==a||"boolean"==typeof a?c.apply(this,arguments):this.animate(gc(b,!0),a,d,e)}}),m.each({slideDown:gc("show"),slideUp:gc("hide"),slideToggle:gc("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(a,b){m.fn[a]=function(a,c,d){return this.animate(b,a,c,d)}}),m.timers=[],m.fx.tick=function(){var a,b=m.timers,c=0;for($b=m.now();c<b.length;c++)a=b[c],a()||b[c]!==a||b.splice(c--,1);b.length||m.fx.stop(),$b=void 0},m.fx.timer=function(a){m.timers.push(a),a()?m.fx.start():m.timers.pop()},m.fx.interval=13,m.fx.start=function(){_b||(_b=setInterval(m.fx.tick,m.fx.interval))},m.fx.stop=function(){clearInterval(_b),_b=null},m.fx.speeds={slow:600,fast:200,_default:400},m.fn.delay=function(a,b){return a=m.fx?m.fx.speeds[a]||a:a,b=b||"fx",this.queue(b,function(b,c){var d=setTimeout(b,a);c.stop=function(){clearTimeout(d)}})},function(){var a,b,c,d,e;b=y.createElement("div"),b.setAttribute("className","t"),b.innerHTML=" <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",d=b.getElementsByTagName("a")[0],c=y.createElement("select"),e=c.appendChild(y.createElement("option")),a=b.getElementsByTagName("input")[0],d.style.cssText="top:1px",k.getSetAttribute="t"!==b.className,k.style=/top/.test(d.getAttribute("style")),k.hrefNormalized="/a"===d.getAttribute("href"),k.checkOn=!!a.value,k.optSelected=e.selected,k.enctype=!!y.createElement("form").enctype,c.disabled=!0,k.optDisabled=!e.disabled,a=y.createElement("input"),a.setAttribute("value",""),k.input=""===a.getAttribute("value"),a.value="t",a.setAttribute("type","radio"),k.radioValue="t"===a.value}();var lc=/\r/g;m.fn.extend({val:function(a){var b,c,d,e=this[0];{if(arguments.length)return d=m.isFunction(a),this.each(function(c){var e;1===this.nodeType&&(e=d?a.call(this,c,m(this).val()):a,null==e?e="":"number"==typeof e?e+="":m.isArray(e)&&(e=m.map(e,function(a){return null==a?"":a+""})),b=m.valHooks[this.type]||m.valHooks[this.nodeName.toLowerCase()],b&&"set"in b&&void 0!==b.set(this,e,"value")||(this.value=e))});if(e)return b=m.valHooks[e.type]||m.valHooks[e.nodeName.toLowerCase()],b&&"get"in b&&void 0!==(c=b.get(e,"value"))?c:(c=e.value,"string"==typeof c?c.replace(lc,""):null==c?"":c)}}}),m.extend({valHooks:{option:{get:function(a){var b=m.find.attr(a,"value");return null!=b?b:m.trim(m.text(a))}},select:{get:function(a){for(var b,c,d=a.options,e=a.selectedIndex,f="select-one"===a.type||0>e,g=f?null:[],h=f?e+1:d.length,i=0>e?h:f?e:0;h>i;i++)if(c=d[i],!(!c.selected&&i!==e||(k.optDisabled?c.disabled:null!==c.getAttribute("disabled"))||c.parentNode.disabled&&m.nodeName(c.parentNode,"optgroup"))){if(b=m(c).val(),f)return b;g.push(b)}return g},set:function(a,b){var c,d,e=a.options,f=m.makeArray(b),g=e.length;while(g--)if(d=e[g],m.inArray(m.valHooks.option.get(d),f)>=0)try{d.selected=c=!0}catch(h){d.scrollHeight}else d.selected=!1;return c||(a.selectedIndex=-1),e}}}}),m.each(["radio","checkbox"],function(){m.valHooks[this]={set:function(a,b){return m.isArray(b)?a.checked=m.inArray(m(a).val(),b)>=0:void 0}},k.checkOn||(m.valHooks[this].get=function(a){return null===a.getAttribute("value")?"on":a.value})});var mc,nc,oc=m.expr.attrHandle,pc=/^(?:checked|selected)$/i,qc=k.getSetAttribute,rc=k.input;m.fn.extend({attr:function(a,b){return V(this,m.attr,a,b,arguments.length>1)},removeAttr:function(a){return this.each(function(){m.removeAttr(this,a)})}}),m.extend({attr:function(a,b,c){var d,e,f=a.nodeType;if(a&&3!==f&&8!==f&&2!==f)return typeof a.getAttribute===K?m.prop(a,b,c):(1===f&&m.isXMLDoc(a)||(b=b.toLowerCase(),d=m.attrHooks[b]||(m.expr.match.bool.test(b)?nc:mc)),void 0===c?d&&"get"in d&&null!==(e=d.get(a,b))?e:(e=m.find.attr(a,b),null==e?void 0:e):null!==c?d&&"set"in d&&void 0!==(e=d.set(a,c,b))?e:(a.setAttribute(b,c+""),c):void m.removeAttr(a,b))},removeAttr:function(a,b){var c,d,e=0,f=b&&b.match(E);if(f&&1===a.nodeType)while(c=f[e++])d=m.propFix[c]||c,m.expr.match.bool.test(c)?rc&&qc||!pc.test(c)?a[d]=!1:a[m.camelCase("default-"+c)]=a[d]=!1:m.attr(a,c,""),a.removeAttribute(qc?c:d)},attrHooks:{type:{set:function(a,b){if(!k.radioValue&&"radio"===b&&m.nodeName(a,"input")){var c=a.value;return a.setAttribute("type",b),c&&(a.value=c),b}}}}}),nc={set:function(a,b,c){return b===!1?m.removeAttr(a,c):rc&&qc||!pc.test(c)?a.setAttribute(!qc&&m.propFix[c]||c,c):a[m.camelCase("default-"+c)]=a[c]=!0,c}},m.each(m.expr.match.bool.source.match(/\w+/g),function(a,b){var c=oc[b]||m.find.attr;oc[b]=rc&&qc||!pc.test(b)?function(a,b,d){var e,f;return d||(f=oc[b],oc[b]=e,e=null!=c(a,b,d)?b.toLowerCase():null,oc[b]=f),e}:function(a,b,c){return c?void 0:a[m.camelCase("default-"+b)]?b.toLowerCase():null}}),rc&&qc||(m.attrHooks.value={set:function(a,b,c){return m.nodeName(a,"input")?void(a.defaultValue=b):mc&&mc.set(a,b,c)}}),qc||(mc={set:function(a,b,c){var d=a.getAttributeNode(c);return d||a.setAttributeNode(d=a.ownerDocument.createAttribute(c)),d.value=b+="","value"===c||b===a.getAttribute(c)?b:void 0}},oc.id=oc.name=oc.coords=function(a,b,c){var d;return c?void 0:(d=a.getAttributeNode(b))&&""!==d.value?d.value:null},m.valHooks.button={get:function(a,b){var c=a.getAttributeNode(b);return c&&c.specified?c.value:void 0},set:mc.set},m.attrHooks.contenteditable={set:function(a,b,c){mc.set(a,""===b?!1:b,c)}},m.each(["width","height"],function(a,b){m.attrHooks[b]={set:function(a,c){return""===c?(a.setAttribute(b,"auto"),c):void 0}}})),k.style||(m.attrHooks.style={get:function(a){return a.style.cssText||void 0},set:function(a,b){return a.style.cssText=b+""}});var sc=/^(?:input|select|textarea|button|object)$/i,tc=/^(?:a|area)$/i;m.fn.extend({prop:function(a,b){return V(this,m.prop,a,b,arguments.length>1)},removeProp:function(a){return a=m.propFix[a]||a,this.each(function(){try{this[a]=void 0,delete this[a]}catch(b){}})}}),m.extend({propFix:{"for":"htmlFor","class":"className"},prop:function(a,b,c){var d,e,f,g=a.nodeType;if(a&&3!==g&&8!==g&&2!==g)return f=1!==g||!m.isXMLDoc(a),f&&(b=m.propFix[b]||b,e=m.propHooks[b]),void 0!==c?e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:a[b]=c:e&&"get"in e&&null!==(d=e.get(a,b))?d:a[b]},propHooks:{tabIndex:{get:function(a){var b=m.find.attr(a,"tabindex");return b?parseInt(b,10):sc.test(a.nodeName)||tc.test(a.nodeName)&&a.href?0:-1}}}}),k.hrefNormalized||m.each(["href","src"],function(a,b){m.propHooks[b]={get:function(a){return a.getAttribute(b,4)}}}),k.optSelected||(m.propHooks.selected={get:function(a){var b=a.parentNode;return b&&(b.selectedIndex,b.parentNode&&b.parentNode.selectedIndex),null}}),m.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){m.propFix[this.toLowerCase()]=this}),k.enctype||(m.propFix.enctype="encoding");var uc=/[\t\r\n\f]/g;m.fn.extend({addClass:function(a){var b,c,d,e,f,g,h=0,i=this.length,j="string"==typeof a&&a;if(m.isFunction(a))return this.each(function(b){m(this).addClass(a.call(this,b,this.className))});if(j)for(b=(a||"").match(E)||[];i>h;h++)if(c=this[h],d=1===c.nodeType&&(c.className?(" "+c.className+" ").replace(uc," "):" ")){f=0;while(e=b[f++])d.indexOf(" "+e+" ")<0&&(d+=e+" ");g=m.trim(d),c.className!==g&&(c.className=g)}return this},removeClass:function(a){var b,c,d,e,f,g,h=0,i=this.length,j=0===arguments.length||"string"==typeof a&&a;if(m.isFunction(a))return this.each(function(b){m(this).removeClass(a.call(this,b,this.className))});if(j)for(b=(a||"").match(E)||[];i>h;h++)if(c=this[h],d=1===c.nodeType&&(c.className?(" "+c.className+" ").replace(uc," "):"")){f=0;while(e=b[f++])while(d.indexOf(" "+e+" ")>=0)d=d.replace(" "+e+" "," ");g=a?m.trim(d):"",c.className!==g&&(c.className=g)}return this},toggleClass:function(a,b){var c=typeof a;return"boolean"==typeof b&&"string"===c?b?this.addClass(a):this.removeClass(a):this.each(m.isFunction(a)?function(c){m(this).toggleClass(a.call(this,c,this.className,b),b)}:function(){if("string"===c){var b,d=0,e=m(this),f=a.match(E)||[];while(b=f[d++])e.hasClass(b)?e.removeClass(b):e.addClass(b)}else(c===K||"boolean"===c)&&(this.className&&m._data(this,"__className__",this.className),this.className=this.className||a===!1?"":m._data(this,"__className__")||"")})},hasClass:function(a){for(var b=" "+a+" ",c=0,d=this.length;d>c;c++)if(1===this[c].nodeType&&(" "+this[c].className+" ").replace(uc," ").indexOf(b)>=0)return!0;return!1}}),m.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu".split(" "),function(a,b){m.fn[b]=function(a,c){return arguments.length>0?this.on(b,null,a,c):this.trigger(b)}}),m.fn.extend({hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)},bind:function(a,b,c){return this.on(a,null,b,c)},unbind:function(a,b){return this.off(a,null,b)},delegate:function(a,b,c,d){return this.on(b,a,c,d)},undelegate:function(a,b,c){return 1===arguments.length?this.off(a,"**"):this.off(b,a||"**",c)}});var vc=m.now(),wc=/\?/,xc=/(,)|(\[|{)|(}|])|"(?:[^"\\\r\n]|\\["\\\/bfnrt]|\\u[\da-fA-F]{4})*"\s*:?|true|false|null|-?(?!0\d)\d+(?:\.\d+|)(?:[eE][+-]?\d+|)/g;m.parseJSON=function(b){if(a.JSON&&a.JSON.parse)return a.JSON.parse(b+"");var c,d=null,e=m.trim(b+"");return e&&!m.trim(e.replace(xc,function(a,b,e,f){return c&&b&&(d=0),0===d?a:(c=e||b,d+=!f-!e,"")}))?Function("return "+e)():m.error("Invalid JSON: "+b)},m.parseXML=function(b){var c,d;if(!b||"string"!=typeof b)return null;try{a.DOMParser?(d=new DOMParser,c=d.parseFromString(b,"text/xml")):(c=new ActiveXObject("Microsoft.XMLDOM"),c.async="false",c.loadXML(b))}catch(e){c=void 0}return c&&c.documentElement&&!c.getElementsByTagName("parsererror").length||m.error("Invalid XML: "+b),c};var yc,zc,Ac=/#.*$/,Bc=/([?&])_=[^&]*/,Cc=/^(.*?):[ \t]*([^\r\n]*)\r?$/gm,Dc=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,Ec=/^(?:GET|HEAD)$/,Fc=/^\/\//,Gc=/^([\w.+-]+:)(?:\/\/(?:[^\/?#]*@|)([^\/?#:]*)(?::(\d+)|)|)/,Hc={},Ic={},Jc="*/".concat("*");try{zc=location.href}catch(Kc){zc=y.createElement("a"),zc.href="",zc=zc.href}yc=Gc.exec(zc.toLowerCase())||[];function Lc(a){return function(b,c){"string"!=typeof b&&(c=b,b="*");var d,e=0,f=b.toLowerCase().match(E)||[];if(m.isFunction(c))while(d=f[e++])"+"===d.charAt(0)?(d=d.slice(1)||"*",(a[d]=a[d]||[]).unshift(c)):(a[d]=a[d]||[]).push(c)}}function Mc(a,b,c,d){var e={},f=a===Ic;function g(h){var i;return e[h]=!0,m.each(a[h]||[],function(a,h){var j=h(b,c,d);return"string"!=typeof j||f||e[j]?f?!(i=j):void 0:(b.dataTypes.unshift(j),g(j),!1)}),i}return g(b.dataTypes[0])||!e["*"]&&g("*")}function Nc(a,b){var c,d,e=m.ajaxSettings.flatOptions||{};for(d in b)void 0!==b[d]&&((e[d]?a:c||(c={}))[d]=b[d]);return c&&m.extend(!0,a,c),a}function Oc(a,b,c){var d,e,f,g,h=a.contents,i=a.dataTypes;while("*"===i[0])i.shift(),void 0===e&&(e=a.mimeType||b.getResponseHeader("Content-Type"));if(e)for(g in h)if(h[g]&&h[g].test(e)){i.unshift(g);break}if(i[0]in c)f=i[0];else{for(g in c){if(!i[0]||a.converters[g+" "+i[0]]){f=g;break}d||(d=g)}f=f||d}return f?(f!==i[0]&&i.unshift(f),c[f]):void 0}function Pc(a,b,c,d){var e,f,g,h,i,j={},k=a.dataTypes.slice();if(k[1])for(g in a.converters)j[g.toLowerCase()]=a.converters[g];f=k.shift();while(f)if(a.responseFields[f]&&(c[a.responseFields[f]]=b),!i&&d&&a.dataFilter&&(b=a.dataFilter(b,a.dataType)),i=f,f=k.shift())if("*"===f)f=i;else if("*"!==i&&i!==f){if(g=j[i+" "+f]||j["* "+f],!g)for(e in j)if(h=e.split(" "),h[1]===f&&(g=j[i+" "+h[0]]||j["* "+h[0]])){g===!0?g=j[e]:j[e]!==!0&&(f=h[0],k.unshift(h[1]));break}if(g!==!0)if(g&&a["throws"])b=g(b);else try{b=g(b)}catch(l){return{state:"parsererror",error:g?l:"No conversion from "+i+" to "+f}}}return{state:"success",data:b}}m.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:zc,type:"GET",isLocal:Dc.test(yc[1]),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":Jc,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":m.parseJSON,"text xml":m.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(a,b){return b?Nc(Nc(a,m.ajaxSettings),b):Nc(m.ajaxSettings,a)},ajaxPrefilter:Lc(Hc),ajaxTransport:Lc(Ic),ajax:function(a,b){"object"==typeof a&&(b=a,a=void 0),b=b||{};var c,d,e,f,g,h,i,j,k=m.ajaxSetup({},b),l=k.context||k,n=k.context&&(l.nodeType||l.jquery)?m(l):m.event,o=m.Deferred(),p=m.Callbacks("once memory"),q=k.statusCode||{},r={},s={},t=0,u="canceled",v={readyState:0,getResponseHeader:function(a){var b;if(2===t){if(!j){j={};while(b=Cc.exec(f))j[b[1].toLowerCase()]=b[2]}b=j[a.toLowerCase()]}return null==b?null:b},getAllResponseHeaders:function(){return 2===t?f:null},setRequestHeader:function(a,b){var c=a.toLowerCase();return t||(a=s[c]=s[c]||a,r[a]=b),this},overrideMimeType:function(a){return t||(k.mimeType=a),this},statusCode:function(a){var b;if(a)if(2>t)for(b in a)q[b]=[q[b],a[b]];else v.always(a[v.status]);return this},abort:function(a){var b=a||u;return i&&i.abort(b),x(0,b),this}};if(o.promise(v).complete=p.add,v.success=v.done,v.error=v.fail,k.url=((a||k.url||zc)+"").replace(Ac,"").replace(Fc,yc[1]+"//"),k.type=b.method||b.type||k.method||k.type,k.dataTypes=m.trim(k.dataType||"*").toLowerCase().match(E)||[""],null==k.crossDomain&&(c=Gc.exec(k.url.toLowerCase()),k.crossDomain=!(!c||c[1]===yc[1]&&c[2]===yc[2]&&(c[3]||("http:"===c[1]?"80":"443"))===(yc[3]||("http:"===yc[1]?"80":"443")))),k.data&&k.processData&&"string"!=typeof k.data&&(k.data=m.param(k.data,k.traditional)),Mc(Hc,k,b,v),2===t)return v;h=k.global,h&&0===m.active++&&m.event.trigger("ajaxStart"),k.type=k.type.toUpperCase(),k.hasContent=!Ec.test(k.type),e=k.url,k.hasContent||(k.data&&(e=k.url+=(wc.test(e)?"&":"?")+k.data,delete k.data),k.cache===!1&&(k.url=Bc.test(e)?e.replace(Bc,"$1_="+vc++):e+(wc.test(e)?"&":"?")+"_="+vc++)),k.ifModified&&(m.lastModified[e]&&v.setRequestHeader("If-Modified-Since",m.lastModified[e]),m.etag[e]&&v.setRequestHeader("If-None-Match",m.etag[e])),(k.data&&k.hasContent&&k.contentType!==!1||b.contentType)&&v.setRequestHeader("Content-Type",k.contentType),v.setRequestHeader("Accept",k.dataTypes[0]&&k.accepts[k.dataTypes[0]]?k.accepts[k.dataTypes[0]]+("*"!==k.dataTypes[0]?", "+Jc+"; q=0.01":""):k.accepts["*"]);for(d in k.headers)v.setRequestHeader(d,k.headers[d]);if(k.beforeSend&&(k.beforeSend.call(l,v,k)===!1||2===t))return v.abort();u="abort";for(d in{success:1,error:1,complete:1})v[d](k[d]);if(i=Mc(Ic,k,b,v)){v.readyState=1,h&&n.trigger("ajaxSend",[v,k]),k.async&&k.timeout>0&&(g=setTimeout(function(){v.abort("timeout")},k.timeout));try{t=1,i.send(r,x)}catch(w){if(!(2>t))throw w;x(-1,w)}}else x(-1,"No Transport");function x(a,b,c,d){var j,r,s,u,w,x=b;2!==t&&(t=2,g&&clearTimeout(g),i=void 0,f=d||"",v.readyState=a>0?4:0,j=a>=200&&300>a||304===a,c&&(u=Oc(k,v,c)),u=Pc(k,u,v,j),j?(k.ifModified&&(w=v.getResponseHeader("Last-Modified"),w&&(m.lastModified[e]=w),w=v.getResponseHeader("etag"),w&&(m.etag[e]=w)),204===a||"HEAD"===k.type?x="nocontent":304===a?x="notmodified":(x=u.state,r=u.data,s=u.error,j=!s)):(s=x,(a||!x)&&(x="error",0>a&&(a=0))),v.status=a,v.statusText=(b||x)+"",j?o.resolveWith(l,[r,x,v]):o.rejectWith(l,[v,x,s]),v.statusCode(q),q=void 0,h&&n.trigger(j?"ajaxSuccess":"ajaxError",[v,k,j?r:s]),p.fireWith(l,[v,x]),h&&(n.trigger("ajaxComplete",[v,k]),--m.active||m.event.trigger("ajaxStop")))}return v},getJSON:function(a,b,c){return m.get(a,b,c,"json")},getScript:function(a,b){return m.get(a,void 0,b,"script")}}),m.each(["get","post"],function(a,b){m[b]=function(a,c,d,e){return m.isFunction(c)&&(e=e||d,d=c,c=void 0),m.ajax({url:a,type:b,dataType:e,data:c,success:d})}}),m.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(a,b){m.fn[b]=function(a){return this.on(b,a)}}),m._evalUrl=function(a){return m.ajax({url:a,type:"GET",dataType:"script",async:!1,global:!1,"throws":!0})},m.fn.extend({wrapAll:function(a){if(m.isFunction(a))return this.each(function(b){m(this).wrapAll(a.call(this,b))});if(this[0]){var b=m(a,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstChild&&1===a.firstChild.nodeType)a=a.firstChild;return a}).append(this)}return this},wrapInner:function(a){return this.each(m.isFunction(a)?function(b){m(this).wrapInner(a.call(this,b))}:function(){var b=m(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=m.isFunction(a);return this.each(function(c){m(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){m.nodeName(this,"body")||m(this).replaceWith(this.childNodes)}).end()}}),m.expr.filters.hidden=function(a){return a.offsetWidth<=0&&a.offsetHeight<=0||!k.reliableHiddenOffsets()&&"none"===(a.style&&a.style.display||m.css(a,"display"))},m.expr.filters.visible=function(a){return!m.expr.filters.hidden(a)};var Qc=/%20/g,Rc=/\[\]$/,Sc=/\r?\n/g,Tc=/^(?:submit|button|image|reset|file)$/i,Uc=/^(?:input|select|textarea|keygen)/i;function Vc(a,b,c,d){var e;if(m.isArray(b))m.each(b,function(b,e){c||Rc.test(a)?d(a,e):Vc(a+"["+("object"==typeof e?b:"")+"]",e,c,d)});else if(c||"object"!==m.type(b))d(a,b);else for(e in b)Vc(a+"["+e+"]",b[e],c,d)}m.param=function(a,b){var c,d=[],e=function(a,b){b=m.isFunction(b)?b():null==b?"":b,d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(b)};if(void 0===b&&(b=m.ajaxSettings&&m.ajaxSettings.traditional),m.isArray(a)||a.jquery&&!m.isPlainObject(a))m.each(a,function(){e(this.name,this.value)});else for(c in a)Vc(c,a[c],b,e);return d.join("&").replace(Qc,"+")},m.fn.extend({serialize:function(){return m.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var a=m.prop(this,"elements");return a?m.makeArray(a):this}).filter(function(){var a=this.type;return this.name&&!m(this).is(":disabled")&&Uc.test(this.nodeName)&&!Tc.test(a)&&(this.checked||!W.test(a))}).map(function(a,b){var c=m(this).val();return null==c?null:m.isArray(c)?m.map(c,function(a){return{name:b.name,value:a.replace(Sc,"\r\n")}}):{name:b.name,value:c.replace(Sc,"\r\n")}}).get()}}),m.ajaxSettings.xhr=void 0!==a.ActiveXObject?function(){return!this.isLocal&&/^(get|post|head|put|delete|options)$/i.test(this.type)&&Zc()||$c()}:Zc;var Wc=0,Xc={},Yc=m.ajaxSettings.xhr();a.ActiveXObject&&m(a).on("unload",function(){for(var a in Xc)Xc[a](void 0,!0)}),k.cors=!!Yc&&"withCredentials"in Yc,Yc=k.ajax=!!Yc,Yc&&m.ajaxTransport(function(a){if(!a.crossDomain||k.cors){var b;return{send:function(c,d){var e,f=a.xhr(),g=++Wc;if(f.open(a.type,a.url,a.async,a.username,a.password),a.xhrFields)for(e in a.xhrFields)f[e]=a.xhrFields[e];a.mimeType&&f.overrideMimeType&&f.overrideMimeType(a.mimeType),a.crossDomain||c["X-Requested-With"]||(c["X-Requested-With"]="XMLHttpRequest");for(e in c)void 0!==c[e]&&f.setRequestHeader(e,c[e]+"");f.send(a.hasContent&&a.data||null),b=function(c,e){var h,i,j;if(b&&(e||4===f.readyState))if(delete Xc[g],b=void 0,f.onreadystatechange=m.noop,e)4!==f.readyState&&f.abort();else{j={},h=f.status,"string"==typeof f.responseText&&(j.text=f.responseText);try{i=f.statusText}catch(k){i=""}h||!a.isLocal||a.crossDomain?1223===h&&(h=204):h=j.text?200:404}j&&d(h,i,j,f.getAllResponseHeaders())},a.async?4===f.readyState?setTimeout(b):f.onreadystatechange=Xc[g]=b:b()},abort:function(){b&&b(void 0,!0)}}}});function Zc(){try{return new a.XMLHttpRequest}catch(b){}}function $c(){try{return new a.ActiveXObject("Microsoft.XMLHTTP")}catch(b){}}m.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/(?:java|ecma)script/},converters:{"text script":function(a){return m.globalEval(a),a}}}),m.ajaxPrefilter("script",function(a){void 0===a.cache&&(a.cache=!1),a.crossDomain&&(a.type="GET",a.global=!1)}),m.ajaxTransport("script",function(a){if(a.crossDomain){var b,c=y.head||m("head")[0]||y.documentElement;return{send:function(d,e){b=y.createElement("script"),b.async=!0,a.scriptCharset&&(b.charset=a.scriptCharset),b.src=a.url,b.onload=b.onreadystatechange=function(a,c){(c||!b.readyState||/loaded|complete/.test(b.readyState))&&(b.onload=b.onreadystatechange=null,b.parentNode&&b.parentNode.removeChild(b),b=null,c||e(200,"success"))},c.insertBefore(b,c.firstChild)},abort:function(){b&&b.onload(void 0,!0)}}}});var _c=[],ad=/(=)\?(?=&|$)|\?\?/;m.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var a=_c.pop()||m.expando+"_"+vc++;return this[a]=!0,a}}),m.ajaxPrefilter("json jsonp",function(b,c,d){var e,f,g,h=b.jsonp!==!1&&(ad.test(b.url)?"url":"string"==typeof b.data&&!(b.contentType||"").indexOf("application/x-www-form-urlencoded")&&ad.test(b.data)&&"data");return h||"jsonp"===b.dataTypes[0]?(e=b.jsonpCallback=m.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,h?b[h]=b[h].replace(ad,"$1"+e):b.jsonp!==!1&&(b.url+=(wc.test(b.url)?"&":"?")+b.jsonp+"="+e),b.converters["script json"]=function(){return g||m.error(e+" was not called"),g[0]},b.dataTypes[0]="json",f=a[e],a[e]=function(){g=arguments},d.always(function(){a[e]=f,b[e]&&(b.jsonpCallback=c.jsonpCallback,_c.push(e)),g&&m.isFunction(f)&&f(g[0]),g=f=void 0}),"script"):void 0}),m.parseHTML=function(a,b,c){if(!a||"string"!=typeof a)return null;"boolean"==typeof b&&(c=b,b=!1),b=b||y;var d=u.exec(a),e=!c&&[];return d?[b.createElement(d[1])]:(d=m.buildFragment([a],b,e),e&&e.length&&m(e).remove(),m.merge([],d.childNodes))};var bd=m.fn.load;m.fn.load=function(a,b,c){if("string"!=typeof a&&bd)return bd.apply(this,arguments);var d,e,f,g=this,h=a.indexOf(" ");return h>=0&&(d=m.trim(a.slice(h,a.length)),a=a.slice(0,h)),m.isFunction(b)?(c=b,b=void 0):b&&"object"==typeof b&&(f="POST"),g.length>0&&m.ajax({url:a,type:f,dataType:"html",data:b}).done(function(a){e=arguments,g.html(d?m("<div>").append(m.parseHTML(a)).find(d):a)}).complete(c&&function(a,b){g.each(c,e||[a.responseText,b,a])}),this},m.expr.filters.animated=function(a){return m.grep(m.timers,function(b){return a===b.elem}).length};var cd=a.document.documentElement;function dd(a){return m.isWindow(a)?a:9===a.nodeType?a.defaultView||a.parentWindow:!1}m.offset={setOffset:function(a,b,c){var d,e,f,g,h,i,j,k=m.css(a,"position"),l=m(a),n={};"static"===k&&(a.style.position="relative"),h=l.offset(),f=m.css(a,"top"),i=m.css(a,"left"),j=("absolute"===k||"fixed"===k)&&m.inArray("auto",[f,i])>-1,j?(d=l.position(),g=d.top,e=d.left):(g=parseFloat(f)||0,e=parseFloat(i)||0),m.isFunction(b)&&(b=b.call(a,c,h)),null!=b.top&&(n.top=b.top-h.top+g),null!=b.left&&(n.left=b.left-h.left+e),"using"in b?b.using.call(a,n):l.css(n)}},m.fn.extend({offset:function(a){if(arguments.length)return void 0===a?this:this.each(function(b){m.offset.setOffset(this,a,b)});var b,c,d={top:0,left:0},e=this[0],f=e&&e.ownerDocument;if(f)return b=f.documentElement,m.contains(b,e)?(typeof e.getBoundingClientRect!==K&&(d=e.getBoundingClientRect()),c=dd(f),{top:d.top+(c.pageYOffset||b.scrollTop)-(b.clientTop||0),left:d.left+(c.pageXOffset||b.scrollLeft)-(b.clientLeft||0)}):d},position:function(){if(this[0]){var a,b,c={top:0,left:0},d=this[0];return"fixed"===m.css(d,"position")?b=d.getBoundingClientRect():(a=this.offsetParent(),b=this.offset(),m.nodeName(a[0],"html")||(c=a.offset()),c.top+=m.css(a[0],"borderTopWidth",!0),c.left+=m.css(a[0],"borderLeftWidth",!0)),{top:b.top-c.top-m.css(d,"marginTop",!0),left:b.left-c.left-m.css(d,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var a=this.offsetParent||cd;while(a&&!m.nodeName(a,"html")&&"static"===m.css(a,"position"))a=a.offsetParent;return a||cd})}}),m.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(a,b){var c=/Y/.test(b);m.fn[a]=function(d){return V(this,function(a,d,e){var f=dd(a);return void 0===e?f?b in f?f[b]:f.document.documentElement[d]:a[d]:void(f?f.scrollTo(c?m(f).scrollLeft():e,c?e:m(f).scrollTop()):a[d]=e)},a,d,arguments.length,null)}}),m.each(["top","left"],function(a,b){m.cssHooks[b]=Lb(k.pixelPosition,function(a,c){return c?(c=Jb(a,b),Hb.test(c)?m(a).position()[b]+"px":c):void 0})}),m.each({Height:"height",Width:"width"},function(a,b){m.each({padding:"inner"+a,content:b,"":"outer"+a},function(c,d){m.fn[d]=function(d,e){var f=arguments.length&&(c||"boolean"!=typeof d),g=c||(d===!0||e===!0?"margin":"border");return V(this,function(b,c,d){var e;return m.isWindow(b)?b.document.documentElement["client"+a]:9===b.nodeType?(e=b.documentElement,Math.max(b.body["scroll"+a],e["scroll"+a],b.body["offset"+a],e["offset"+a],e["client"+a])):void 0===d?m.css(b,c,g):m.style(b,c,d,g)},b,f?d:void 0,f,null)}})}),m.fn.size=function(){return this.length},m.fn.andSelf=m.fn.addBack,"function"==typeof define&&define.amd&&define("jquery",[],function(){return m});var ed=a.jQuery,fd=a.$;return m.noConflict=function(b){return a.$===m&&(a.$=fd),b&&a.jQuery===m&&(a.jQuery=ed),m},typeof b===K&&(a.jQuery=a.$=m),m});
docs/waifu_plugin/live2d.js ADDED
The diff for this file is too large to render. See raw diff
 
docs/waifu_plugin/source ADDED
@@ -0,0 +1 @@
 
 
1
+ https://github.com/fghrsh/live2d_demo
docs/waifu_plugin/waifu-tips.js ADDED
@@ -0,0 +1,405 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ window.live2d_settings = Array(); /*
2
+
3
+ く__,.ヘヽ.    / ,ー、 〉
4
+      \ ', !-─‐-i / /´
5
+       /`ー'    L//`ヽ、 Live2D 看板娘 参数设置
6
+      /  /,  /|  ,  ,    ', Version 1.4.2
7
+    イ  / /-‐/ i L_ ハ ヽ!  i Update 2018.11.12
8
+     レ ヘ 7イ`ト  レ'ァ-ト、!ハ|  |
9
+      !,/7 '0'   ´0iソ|   |   
10
+      |.从"  _   ,,,, / |./   | 网页添加 Live2D 看板娘
11
+      レ'| i>.、,,__ _,.イ /  .i  | https://www.fghrsh.net/post/123.html
12
+       レ'| | / k_7_/レ'ヽ, ハ. |
13
+        | |/i 〈|/  i ,.ヘ | i | Thanks
14
+       .|/ / i:   ヘ!  \ | journey-ad / https://github.com/journey-ad/live2d_src
15
+         kヽ>、ハ   _,.ヘ、   /、! xiazeyu / https://github.com/xiazeyu/live2d-widget.js
16
+        !'〈//`T´', \ `'7'ーr' Live2d Cubism SDK WebGL 2.1 Projrct & All model authors.
17
+        レ'ヽL__|___i,___,ンレ|ノ
18
+          ト-,/ |___./
19
+          'ー'  !_,.:*********************************************************************************/
20
+
21
+
22
+ // 后端接口
23
+ live2d_settings['modelAPI'] = '//live2d.fghrsh.net/api/'; // 自建 API 修改这里
24
+ live2d_settings['tipsMessage'] = 'waifu-tips.json'; // 同目录下可省略路径
25
+ live2d_settings['hitokotoAPI'] = 'lwl12.com'; // 一言 API,可选 'lwl12.com', 'hitokoto.cn', 'jinrishici.com'(古诗词)
26
+
27
+ // 默认模型
28
+ live2d_settings['modelId'] = 1; // 默认模型 ID,可在 F12 控制台找到
29
+ live2d_settings['modelTexturesId'] = 53; // 默认材质 ID,可在 F12 控制台找到
30
+
31
+ // 工具栏设置
32
+ live2d_settings['showToolMenu'] = true; // 显示 工具栏 ,可选 true(真), false(假)
33
+ live2d_settings['canCloseLive2d'] = true; // 显示 关闭看板娘 按钮,可选 true(真), false(假)
34
+ live2d_settings['canSwitchModel'] = true; // 显示 模型切换 按钮,可选 true(真), false(假)
35
+ live2d_settings['canSwitchTextures'] = true; // 显示 材质切换 按钮,可选 true(真), false(假)
36
+ live2d_settings['canSwitchHitokoto'] = true; // 显示 一言切换 按钮,可选 true(真), false(假)
37
+ live2d_settings['canTakeScreenshot'] = true; // 显示 看板娘截图 按钮,可选 true(真), false(假)
38
+ live2d_settings['canTurnToHomePage'] = true; // 显示 返回首页 按钮,可选 true(真), false(假)
39
+ live2d_settings['canTurnToAboutPage'] = true; // 显示 跳转关于页 按钮,可选 true(真), false(假)
40
+
41
+ // 模型切换模式
42
+ live2d_settings['modelStorage'] = true; // 记录 ID (刷新后恢复),可选 true(真), false(假)
43
+ live2d_settings['modelRandMode'] = 'switch'; // 模型切换,可选 'rand'(随机), 'switch'(顺序)
44
+ live2d_settings['modelTexturesRandMode']= 'rand'; // 材质切换,可选 'rand'(随机), 'switch'(顺序)
45
+
46
+ // 提示消息选项
47
+ live2d_settings['showHitokoto'] = true; // 显示一言
48
+ live2d_settings['showF12Status'] = true; // 显示加载状态
49
+ live2d_settings['showF12Message'] = false; // 显示看板娘消息
50
+ live2d_settings['showF12OpenMsg'] = true; // 显示控制台打开提示
51
+ live2d_settings['showCopyMessage'] = true; // 显示 复制内容 提示
52
+ live2d_settings['showWelcomeMessage'] = true; // 显示进入面页欢迎词
53
+
54
+ //看板娘样式设置
55
+ live2d_settings['waifuSize'] = '280x250'; // 看板娘大小,例如 '280x250', '600x535'
56
+ live2d_settings['waifuTipsSize'] = '250x70'; // 提示框大小,例如 '250x70', '570x150'
57
+ live2d_settings['waifuFontSize'] = '12px'; // 提示框字体,例如 '12px', '30px'
58
+ live2d_settings['waifuToolFont'] = '14px'; // 工具栏字体,例如 '14px', '36px'
59
+ live2d_settings['waifuToolLine'] = '20px'; // 工具栏行高,例如 '20px', '36px'
60
+ live2d_settings['waifuToolTop'] = '0px' // 工具栏顶部边距,例如 '0px', '-60px'
61
+ live2d_settings['waifuMinWidth'] = '768px'; // 面页小于 指定宽度 隐藏看板娘,例如 'disable'(禁用), '768px'
62
+ live2d_settings['waifuEdgeSide'] = 'left:0'; // 看板娘贴边方向,例如 'left:0'(靠左 0px), 'right:30'(靠右 30px)
63
+ live2d_settings['waifuDraggable'] = 'disable'; // 拖拽样式,例如 'disable'(禁用), 'axis-x'(只能水平拖拽), 'unlimited'(自由拖拽)
64
+ live2d_settings['waifuDraggableRevert'] = true; // 松开鼠标还原拖拽位置,可选 true(真), false(假)
65
+
66
+ // 其他杂项设置
67
+ live2d_settings['l2dVersion'] = '1.4.2'; // 当前版本
68
+ live2d_settings['l2dVerDate'] = '2018.11.12'; // 版本更新日期
69
+ live2d_settings['homePageUrl'] = 'auto'; // 主页地址,可选 'auto'(自动), '{URL 网址}'
70
+ live2d_settings['aboutPageUrl'] = 'https://www.fghrsh.net/post/123.html'; // 关于页地址, '{URL 网址}'
71
+ live2d_settings['screenshotCaptureName']= 'live2d.png'; // 看板娘截图文件名,例如 'live2d.png'
72
+
73
+ /****************************************************************************************************/
74
+
75
+ String.prototype.render = function(context) {
76
+ var tokenReg = /(\\)?\{([^\{\}\\]+)(\\)?\}/g;
77
+
78
+ return this.replace(tokenReg, function (word, slash1, token, slash2) {
79
+ if (slash1 || slash2) { return word.replace('\\', ''); }
80
+
81
+ var variables = token.replace(/\s/g, '').split('.');
82
+ var currentObject = context;
83
+ var i, length, variable;
84
+
85
+ for (i = 0, length = variables.length; i < length; ++i) {
86
+ variable = variables[i];
87
+ currentObject = currentObject[variable];
88
+ if (currentObject === undefined || currentObject === null) return '';
89
+ }
90
+ return currentObject;
91
+ });
92
+ };
93
+
94
+ var re = /x/;
95
+ console.log(re);
96
+
97
+ function empty(obj) {return typeof obj=="undefined"||obj==null||obj==""?true:false}
98
+ function getRandText(text) {return Array.isArray(text) ? text[Math.floor(Math.random() * text.length + 1)-1] : text}
99
+
100
+ function showMessage(text, timeout, flag) {
101
+ if(flag || sessionStorage.getItem('waifu-text') === '' || sessionStorage.getItem('waifu-text') === null){
102
+ if(Array.isArray(text)) text = text[Math.floor(Math.random() * text.length + 1)-1];
103
+ if (live2d_settings.showF12Message) console.log('[Message]', text.replace(/<[^<>]+>/g,''));
104
+
105
+ if(flag) sessionStorage.setItem('waifu-text', text);
106
+
107
+ $('.waifu-tips').stop();
108
+ $('.waifu-tips').html(text).fadeTo(200, 1);
109
+ if (timeout === undefined) timeout = 5000;
110
+ hideMessage(timeout);
111
+ }
112
+ }
113
+
114
+ function hideMessage(timeout) {
115
+ $('.waifu-tips').stop().css('opacity',1);
116
+ if (timeout === undefined) timeout = 5000;
117
+ window.setTimeout(function() {sessionStorage.removeItem('waifu-text')}, timeout);
118
+ $('.waifu-tips').delay(timeout).fadeTo(200, 0);
119
+ }
120
+
121
+ function initModel(waifuPath, type) {
122
+ /* console welcome message */
123
+ eval(function(p,a,c,k,e,r){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--)r[e(c)]=k[c]||e(c);k=[function(e){return r[e]}];e=function(){return'\\w+'};c=1};while(c--)if(k[c])p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c]);return p}('8.d(" ");8.d("\\U,.\\y\\5.\\1\\1\\1\\1/\\1,\\u\\2 \\H\\n\\1\\1\\1\\1\\1\\b \', !-\\r\\j-i\\1/\\1/\\g\\n\\1\\1\\1 \\1 \\a\\4\\f\'\\1\\1\\1 L/\\a\\4\\5\\2\\n\\1\\1 \\1 /\\1 \\a,\\1 /|\\1 ,\\1 ,\\1\\1\\1 \',\\n\\1\\1\\1\\q \\1/ /-\\j/\\1\\h\\E \\9 \\5!\\1 i\\n\\1\\1\\1 \\3 \\6 7\\q\\4\\c\\1 \\3\'\\s-\\c\\2!\\t|\\1 |\\n\\1\\1\\1\\1 !,/7 \'0\'\\1\\1 \\X\\w| \\1 |\\1\\1\\1\\n\\1\\1\\1\\1 |.\\x\\"\\1\\l\\1\\1 ,,,, / |./ \\1 |\\n\\1\\1\\1\\1 \\3\'| i\\z.\\2,,A\\l,.\\B / \\1.i \\1|\\n\\1\\1\\1\\1\\1 \\3\'| | / C\\D/\\3\'\\5,\\1\\9.\\1|\\n\\1\\1\\1\\1\\1\\1 | |/i \\m|/\\1 i\\1,.\\6 |\\F\\1|\\n\\1\\1\\1\\1\\1\\1.|/ /\\1\\h\\G \\1 \\6!\\1\\1\\b\\1|\\n\\1\\1\\1 \\1 \\1 k\\5>\\2\\9 \\1 o,.\\6\\2 \\1 /\\2!\\n\\1\\1\\1\\1\\1\\1 !\'\\m//\\4\\I\\g\', \\b \\4\'7\'\\J\'\\n\\1\\1\\1\\1\\1\\1 \\3\'\\K|M,p,\\O\\3|\\P\\n\\1\\1\\1\\1\\1 \\1\\1\\1\\c-,/\\1|p./\\n\\1\\1\\1\\1\\1 \\1\\1\\1\'\\f\'\\1\\1!o,.:\\Q \\R\\S\\T v"+e.V+" / W "+e.N);8.d(" ");',60,60,'|u3000|uff64|uff9a|uff40|u30fd|uff8d||console|uff8a|uff0f|uff3c|uff84|log|live2d_settings|uff70|u00b4|uff49||u2010||u3000_|u3008||_|___|uff72|u2500|uff67|u30cf|u30fc||u30bd|u4ece|u30d8|uff1e|__|u30a4|k_|uff17_|u3000L_|u3000i|uff1a|u3009|uff34|uff70r|u30fdL__||___i|l2dVerDate|u30f3|u30ce|nLive2D|u770b|u677f|u5a18|u304f__|l2dVersion|FGHRSH|u00b40i'.split('|'),0,{}));
124
+
125
+ /* 判断 JQuery */
126
+ if (typeof($.ajax) != 'function') typeof(jQuery.ajax) == 'function' ? window.$ = jQuery : console.log('[Error] JQuery is not defined.');
127
+
128
+ /* 加载看板娘样式 */
129
+ live2d_settings.waifuSize = live2d_settings.waifuSize.split('x');
130
+ live2d_settings.waifuTipsSize = live2d_settings.waifuTipsSize.split('x');
131
+ live2d_settings.waifuEdgeSide = live2d_settings.waifuEdgeSide.split(':');
132
+
133
+ $("#live2d").attr("width",live2d_settings.waifuSize[0]);
134
+ $("#live2d").attr("height",live2d_settings.waifuSize[1]);
135
+ $(".waifu-tips").width(live2d_settings.waifuTipsSize[0]);
136
+ $(".waifu-tips").height(live2d_settings.waifuTipsSize[1]);
137
+ $(".waifu-tips").css("top",live2d_settings.waifuToolTop);
138
+ $(".waifu-tips").css("font-size",live2d_settings.waifuFontSize);
139
+ $(".waifu-tool").css("font-size",live2d_settings.waifuToolFont);
140
+ $(".waifu-tool span").css("line-height",live2d_settings.waifuToolLine);
141
+
142
+ if (live2d_settings.waifuEdgeSide[0] == 'left') $(".waifu").css("left",live2d_settings.waifuEdgeSide[1]+'px');
143
+ else if (live2d_settings.waifuEdgeSide[0] == 'right') $(".waifu").css("right",live2d_settings.waifuEdgeSide[1]+'px');
144
+
145
+ window.waifuResize = function() { $(window).width() <= Number(live2d_settings.waifuMinWidth.replace('px','')) ? $(".waifu").hide() : $(".waifu").show(); };
146
+ if (live2d_settings.waifuMinWidth != 'disable') { waifuResize(); $(window).resize(function() {waifuResize()}); }
147
+
148
+ try {
149
+ if (live2d_settings.waifuDraggable == 'axis-x') $(".waifu").draggable({ axis: "x", revert: live2d_settings.waifuDraggableRevert });
150
+ else if (live2d_settings.waifuDraggable == 'unlimited') $(".waifu").draggable({ revert: live2d_settings.waifuDraggableRevert });
151
+ else $(".waifu").css("transition", 'all .3s ease-in-out');
152
+ } catch(err) { console.log('[Error] JQuery UI is not defined.') }
153
+
154
+ live2d_settings.homePageUrl = live2d_settings.homePageUrl == 'auto' ? window.location.protocol+'//'+window.location.hostname+'/' : live2d_settings.homePageUrl;
155
+ if (window.location.protocol == 'file:' && live2d_settings.modelAPI.substr(0,2) == '//') live2d_settings.modelAPI = 'http:'+live2d_settings.modelAPI;
156
+
157
+ $('.waifu-tool .fui-home').click(function (){
158
+ //window.location = 'https://www.fghrsh.net/';
159
+ window.location = live2d_settings.homePageUrl;
160
+ });
161
+
162
+ $('.waifu-tool .fui-info-circle').click(function (){
163
+ //window.open('https://imjad.cn/archives/lab/add-dynamic-poster-girl-with-live2d-to-your-blog-02');
164
+ window.open(live2d_settings.aboutPageUrl);
165
+ });
166
+
167
+ if (typeof(waifuPath) == "object") loadTipsMessage(waifuPath); else {
168
+ $.ajax({
169
+ cache: true,
170
+ url: waifuPath == '' ? live2d_settings.tipsMessage : (waifuPath.substr(waifuPath.length-15)=='waifu-tips.json'?waifuPath:waifuPath+'waifu-tips.json'),
171
+ dataType: "json",
172
+ success: function (result){ loadTipsMessage(result); }
173
+ });
174
+ }
175
+
176
+ if (!live2d_settings.showToolMenu) $('.waifu-tool').hide();
177
+ if (!live2d_settings.canCloseLive2d) $('.waifu-tool .fui-cross').hide();
178
+ if (!live2d_settings.canSwitchModel) $('.waifu-tool .fui-eye').hide();
179
+ if (!live2d_settings.canSwitchTextures) $('.waifu-tool .fui-user').hide();
180
+ if (!live2d_settings.canSwitchHitokoto) $('.waifu-tool .fui-chat').hide();
181
+ if (!live2d_settings.canTakeScreenshot) $('.waifu-tool .fui-photo').hide();
182
+ if (!live2d_settings.canTurnToHomePage) $('.waifu-tool .fui-home').hide();
183
+ if (!live2d_settings.canTurnToAboutPage) $('.waifu-tool .fui-info-circle').hide();
184
+
185
+ if (waifuPath === undefined) waifuPath = '';
186
+ var modelId = localStorage.getItem('modelId');
187
+ var modelTexturesId = localStorage.getItem('modelTexturesId');
188
+
189
+ if (!live2d_settings.modelStorage || modelId == null) {
190
+ var modelId = live2d_settings.modelId;
191
+ var modelTexturesId = live2d_settings.modelTexturesId;
192
+ } loadModel(modelId, modelTexturesId);
193
+ }
194
+
195
+ function loadModel(modelId, modelTexturesId=0) {
196
+ if (live2d_settings.modelStorage) {
197
+ localStorage.setItem('modelId', modelId);
198
+ localStorage.setItem('modelTexturesId', modelTexturesId);
199
+ } else {
200
+ sessionStorage.setItem('modelId', modelId);
201
+ sessionStorage.setItem('modelTexturesId', modelTexturesId);
202
+ } loadlive2d('live2d', live2d_settings.modelAPI+'get/?id='+modelId+'-'+modelTexturesId, (live2d_settings.showF12Status ? console.log('[Status]','live2d','模型',modelId+'-'+modelTexturesId,'加载完成'):null));
203
+ }
204
+
205
+ function loadTipsMessage(result) {
206
+ window.waifu_tips = result;
207
+
208
+ $.each(result.mouseover, function (index, tips){
209
+ $(document).on("mouseover", tips.selector, function (){
210
+ var text = getRandText(tips.text);
211
+ text = text.render({text: $(this).text()});
212
+ showMessage(text, 3000);
213
+ });
214
+ });
215
+ $.each(result.click, function (index, tips){
216
+ $(document).on("click", tips.selector, function (){
217
+ var text = getRandText(tips.text);
218
+ text = text.render({text: $(this).text()});
219
+ showMessage(text, 3000, true);
220
+ });
221
+ });
222
+ $.each(result.seasons, function (index, tips){
223
+ var now = new Date();
224
+ var after = tips.date.split('-')[0];
225
+ var before = tips.date.split('-')[1] || after;
226
+
227
+ if((after.split('/')[0] <= now.getMonth()+1 && now.getMonth()+1 <= before.split('/')[0]) &&
228
+ (after.split('/')[1] <= now.getDate() && now.getDate() <= before.split('/')[1])){
229
+ var text = getRandText(tips.text);
230
+ text = text.render({year: now.getFullYear()});
231
+ showMessage(text, 6000, true);
232
+ }
233
+ });
234
+
235
+ if (live2d_settings.showF12OpenMsg) {
236
+ re.toString = function() {
237
+ showMessage(getRandText(result.waifu.console_open_msg), 5000, true);
238
+ return '';
239
+ };
240
+ }
241
+
242
+ if (live2d_settings.showCopyMessage) {
243
+ $(document).on('copy', function() {
244
+ showMessage(getRandText(result.waifu.copy_message), 5000, true);
245
+ });
246
+ }
247
+
248
+ $('.waifu-tool .fui-photo').click(function(){
249
+ showMessage(getRandText(result.waifu.screenshot_message), 5000, true);
250
+ window.Live2D.captureName = live2d_settings.screenshotCaptureName;
251
+ window.Live2D.captureFrame = true;
252
+ });
253
+
254
+ $('.waifu-tool .fui-cross').click(function(){
255
+ sessionStorage.setItem('waifu-dsiplay', 'none');
256
+ showMessage(getRandText(result.waifu.hidden_message), 1300, true);
257
+ window.setTimeout(function() {$('.waifu').hide();}, 1300);
258
+ });
259
+
260
+ window.showWelcomeMessage = function(result) {
261
+ var text;
262
+ if (window.location.href == live2d_settings.homePageUrl) {
263
+ var now = (new Date()).getHours();
264
+ if (now > 23 || now <= 5) text = getRandText(result.waifu.hour_tips['t23-5']);
265
+ else if (now > 5 && now <= 7) text = getRandText(result.waifu.hour_tips['t5-7']);
266
+ else if (now > 7 && now <= 11) text = getRandText(result.waifu.hour_tips['t7-11']);
267
+ else if (now > 11 && now <= 14) text = getRandText(result.waifu.hour_tips['t11-14']);
268
+ else if (now > 14 && now <= 17) text = getRandText(result.waifu.hour_tips['t14-17']);
269
+ else if (now > 17 && now <= 19) text = getRandText(result.waifu.hour_tips['t17-19']);
270
+ else if (now > 19 && now <= 21) text = getRandText(result.waifu.hour_tips['t19-21']);
271
+ else if (now > 21 && now <= 23) text = getRandText(result.waifu.hour_tips['t21-23']);
272
+ else text = getRandText(result.waifu.hour_tips.default);
273
+ } else {
274
+ var referrer_message = result.waifu.referrer_message;
275
+ if (document.referrer !== '') {
276
+ var referrer = document.createElement('a');
277
+ referrer.href = document.referrer;
278
+ var domain = referrer.hostname.split('.')[1];
279
+ if (window.location.hostname == referrer.hostname)
280
+ text = referrer_message.localhost[0] + document.title.split(referrer_message.localhost[2])[0] + referrer_message.localhost[1];
281
+ else if (domain == 'baidu')
282
+ text = referrer_message.baidu[0] + referrer.search.split('&wd=')[1].split('&')[0] + referrer_message.baidu[1];
283
+ else if (domain == 'so')
284
+ text = referrer_message.so[0] + referrer.search.split('&q=')[1].split('&')[0] + referrer_message.so[1];
285
+ else if (domain == 'google')
286
+ text = referrer_message.google[0] + document.title.split(referrer_message.google[2])[0] + referrer_message.google[1];
287
+ else {
288
+ $.each(result.waifu.referrer_hostname, function(i,val) {if (i==referrer.hostname) referrer.hostname = getRandText(val)});
289
+ text = referrer_message.default[0] + referrer.hostname + referrer_message.default[1];
290
+ }
291
+ } else text = referrer_message.none[0] + document.title.split(referrer_message.none[2])[0] + referrer_message.none[1];
292
+ }
293
+ showMessage(text, 6000);
294
+ }; if (live2d_settings.showWelcomeMessage) showWelcomeMessage(result);
295
+
296
+ var waifu_tips = result.waifu;
297
+
298
+ function loadOtherModel() {
299
+ var modelId = modelStorageGetItem('modelId');
300
+ var modelRandMode = live2d_settings.modelRandMode;
301
+
302
+ $.ajax({
303
+ cache: modelRandMode == 'switch' ? true : false,
304
+ url: live2d_settings.modelAPI+modelRandMode+'/?id='+modelId,
305
+ dataType: "json",
306
+ success: function(result) {
307
+ loadModel(result.model['id']);
308
+ var message = result.model['message'];
309
+ $.each(waifu_tips.model_message, function(i,val) {if (i==result.model['id']) message = getRandText(val)});
310
+ showMessage(message, 3000, true);
311
+ }
312
+ });
313
+ }
314
+
315
+ function loadRandTextures() {
316
+ var modelId = modelStorageGetItem('modelId');
317
+ var modelTexturesId = modelStorageGetItem('modelTexturesId');
318
+ var modelTexturesRandMode = live2d_settings.modelTexturesRandMode;
319
+
320
+ $.ajax({
321
+ cache: modelTexturesRandMode == 'switch' ? true : false,
322
+ url: live2d_settings.modelAPI+modelTexturesRandMode+'_textures/?id='+modelId+'-'+modelTexturesId,
323
+ dataType: "json",
324
+ success: function(result) {
325
+ if (result.textures['id'] == 1 && (modelTexturesId == 1 || modelTexturesId == 0))
326
+ showMessage(waifu_tips.load_rand_textures[0], 3000, true);
327
+ else showMessage(waifu_tips.load_rand_textures[1], 3000, true);
328
+ loadModel(modelId, result.textures['id']);
329
+ }
330
+ });
331
+ }
332
+
333
+ function modelStorageGetItem(key) { return live2d_settings.modelStorage ? localStorage.getItem(key) : sessionStorage.getItem(key); }
334
+
335
+ /* 检测用户活动状态,并在空闲时显示一言 */
336
+ if (live2d_settings.showHitokoto) {
337
+ window.getActed = false; window.hitokotoTimer = 0; window.hitokotoInterval = false;
338
+ $(document).mousemove(function(e){getActed = true;}).keydown(function(){getActed = true;});
339
+ setInterval(function(){ if (!getActed) ifActed(); else elseActed(); }, 1000);
340
+ }
341
+
342
+ function ifActed() {
343
+ if (!hitokotoInterval) {
344
+ hitokotoInterval = true;
345
+ hitokotoTimer = window.setInterval(showHitokotoActed, 30000);
346
+ }
347
+ }
348
+
349
+ function elseActed() {
350
+ getActed = hitokotoInterval = false;
351
+ window.clearInterval(hitokotoTimer);
352
+ }
353
+
354
+ function showHitokotoActed() {
355
+ if ($(document)[0].visibilityState == 'visible') showHitokoto();
356
+ }
357
+
358
+ function showHitokoto() {
359
+ switch(live2d_settings.hitokotoAPI) {
360
+ case 'lwl12.com':
361
+ $.getJSON('https://api.lwl12.com/hitokoto/v1?encode=realjson',function(result){
362
+ if (!empty(result.source)) {
363
+ var text = waifu_tips.hitokoto_api_message['lwl12.com'][0];
364
+ if (!empty(result.author)) text += waifu_tips.hitokoto_api_message['lwl12.com'][1];
365
+ text = text.render({source: result.source, creator: result.author});
366
+ window.setTimeout(function() {showMessage(text+waifu_tips.hitokoto_api_message['lwl12.com'][2], 3000, true);}, 5000);
367
+ } showMessage(result.text, 5000, true);
368
+ });break;
369
+ case 'fghrsh.net':
370
+ $.getJSON('https://api.fghrsh.net/hitokoto/rand/?encode=jsc&uid=3335',function(result){
371
+ if (!empty(result.source)) {
372
+ var text = waifu_tips.hitokoto_api_message['fghrsh.net'][0];
373
+ text = text.render({source: result.source, date: result.date});
374
+ window.setTimeout(function() {showMessage(text, 3000, true);}, 5000);
375
+ showMessage(result.hitokoto, 5000, true);
376
+ }
377
+ });break;
378
+ case 'jinrishici.com':
379
+ $.ajax({
380
+ url: 'https://v2.jinrishici.com/one.json',
381
+ xhrFields: {withCredentials: true},
382
+ success: function (result, status) {
383
+ if (!empty(result.data.origin.title)) {
384
+ var text = waifu_tips.hitokoto_api_message['jinrishici.com'][0];
385
+ text = text.render({title: result.data.origin.title, dynasty: result.data.origin.dynasty, author:result.data.origin.author});
386
+ window.setTimeout(function() {showMessage(text, 3000, true);}, 5000);
387
+ } showMessage(result.data.content, 5000, true);
388
+ }
389
+ });break;
390
+ default:
391
+ $.getJSON('https://v1.hitokoto.cn',function(result){
392
+ if (!empty(result.from)) {
393
+ var text = waifu_tips.hitokoto_api_message['hitokoto.cn'][0];
394
+ text = text.render({source: result.from, creator: result.creator});
395
+ window.setTimeout(function() {showMessage(text, 3000, true);}, 5000);
396
+ }
397
+ showMessage(result.hitokoto, 5000, true);
398
+ });
399
+ }
400
+ }
401
+
402
+ $('.waifu-tool .fui-eye').click(function (){loadOtherModel()});
403
+ $('.waifu-tool .fui-user').click(function (){loadRandTextures()});
404
+ $('.waifu-tool .fui-chat').click(function (){showHitokoto()});
405
+ }
docs/waifu_plugin/waifu-tips.json ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "waifu": {
3
+ "console_open_msg": ["哈哈,你打开了控制台,是想要看看我的秘密吗?"],
4
+ "copy_message": ["你都复制了些什么呀,转载要记得加上出处哦"],
5
+ "screenshot_message": ["照好了嘛,是不是很可爱呢?"],
6
+ "hidden_message": ["我们还能再见面的吧…"],
7
+ "load_rand_textures": ["我还没有其他衣服呢", "我的新衣服好看嘛"],
8
+ "hour_tips": {
9
+ "t0-5": ["快睡觉去吧,年纪轻轻小心猝死哦"],
10
+ "t5-7": ["早上好!一日之计在于晨,美好的一天就要开始了"],
11
+ "t7-11": ["上午好!工作顺利嘛,不要久坐,多起来走动走动哦!"],
12
+ "t11-14": ["中午了,工作了一个上午,现在是午餐时间!"],
13
+ "t14-17": ["午后很容易犯困呢,今天的运动目标完成了吗?"],
14
+ "t17-19": ["傍晚了!窗外夕阳的景色很美丽呢,最美不过夕阳红~"],
15
+ "t19-21": ["晚上好,今天过得怎么样?"],
16
+ "t21-23": ["已经这么晚了呀,早点休息吧,晚安~"],
17
+ "t23-24": ["你是夜猫子呀?这么晚还不睡觉,明天起的来嘛"],
18
+ "default": ["嗨~ 快来逗我玩吧!"]
19
+ },
20
+ "referrer_message": {
21
+ "localhost": ["欢迎使用<span style=\"color:rgba(245, 20, 20, 0.62);\">『ChatGPT", "』</span>", " - "],
22
+ "baidu": ["Hello! 来自 百度搜索 的朋友<br>你是搜索 <span style=\"color:rgba(245, 20, 20, 0.62);\">", "</span> 找到的我吗?"],
23
+ "so": ["Hello! 来自 360搜索 的朋友<br>你是搜索 <span style=\"color:rgba(245, 20, 20, 0.62);\">", "</span> 找到的我吗?"],
24
+ "google": ["Hello! 来自 谷歌搜索 的朋友<br>欢迎使用<span style=\"color:rgba(245, 20, 20, 0.62);\">『ChatGPT", "』</span>", " - "],
25
+ "default": ["Hello! 来自 <span style=\"color:rgba(245, 20, 20, 0.62);\">", "</span> 的朋友"],
26
+ "none": ["欢迎使用<span style=\"color:rgba(245, 20, 20, 0.62);\">『ChatGPT", "』</span>", " - "]
27
+ },
28
+ "referrer_hostname": {
29
+ "example.com": ["示例网站"],
30
+ "www.fghrsh.net": ["FGHRSH 的博客"]
31
+ },
32
+ "model_message": {
33
+ "1": ["来自 Potion Maker 的 Pio 酱 ~"],
34
+ "2": ["来自 Potion Maker 的 Tia 酱 ~"]
35
+ },
36
+ "hitokoto_api_message": {
37
+ "lwl12.com": ["这句一言来自 <span style=\"color:#0099cc;\">『{source}』</span>", ",是 <span style=\"color:#0099cc;\">{creator}</span> 投稿的", "。"],
38
+ "fghrsh.net": ["这句一言出处是 <span style=\"color:#0099cc;\">『{source}』</span>,是 <span style=\"color:#0099cc;\">FGHRSH</span> 在 {date} 收藏的!"],
39
+ "jinrishici.com": ["这句诗词出自 <span style=\"color:#0099cc;\">《{title}》</span>,是 {dynasty}诗人 {author} 创作的!"],
40
+ "hitokoto.cn": ["这句一言来自 <span style=\"color:#0099cc;\">『{source}』</span>,是 <span style=\"color:#0099cc;\">{creator}</span> 在 hitokoto.cn 投稿的。"]
41
+ }
42
+ },
43
+ "mouseover": [
44
+ { "selector": ".container a[href^='http']", "text": ["要看看 <span style=\"color:#0099cc;\">{text}</span> 么?"] },
45
+ { "selector": ".fui-home", "text": ["点击前往首页,想回到上一页可以使用浏览器的后退功能哦"] },
46
+ { "selector": ".fui-chat", "text": ["一言一语,一颦一笑。一字一句,一颗赛艇。"] },
47
+ { "selector": ".fui-eye", "text": ["嗯··· 要切换 看板娘 吗?"] },
48
+ { "selector": ".fui-user", "text": ["喜欢换装 Play 吗?"] },
49
+ { "selector": ".fui-photo", "text": ["要拍张纪念照片吗?"] },
50
+ { "selector": ".fui-info-circle", "text": ["这里有关于我的信息呢"] },
51
+ { "selector": ".fui-cross", "text": ["你不喜欢我了吗..."] },
52
+ { "selector": "#tor_show", "text": ["翻页比较麻烦吗,点击可以显示这篇文章的目录呢"] },
53
+ { "selector": "#comment_go", "text": ["想要去评论些什么吗?"] },
54
+ { "selector": "#night_mode", "text": ["深夜时要爱护眼睛呀"] },
55
+ { "selector": "#qrcode", "text": ["手机扫一下就能继续看,很方便呢"] },
56
+ { "selector": ".comment_reply", "text": ["要吐槽些什么呢"] },
57
+ { "selector": "#back-to-top", "text": ["回到开始的地方吧"] },
58
+ { "selector": "#author", "text": ["该怎么称呼你呢"] },
59
+ { "selector": "#mail", "text": ["留下你的邮箱,不然就是无头像人士了"] },
60
+ { "selector": "#url", "text": ["你的家在哪里呢,好让我去参观参观"] },
61
+ { "selector": "#textarea", "text": ["认真填写哦,垃圾评论是禁止事项"] },
62
+ { "selector": ".OwO-logo", "text": ["要插入一个表情吗"] },
63
+ { "selector": "#csubmit", "text": ["要[提交]^(Commit)了吗,首次评论需要审核,请耐心等待~"] },
64
+ { "selector": ".ImageBox", "text": ["点击图片可以放大呢"] },
65
+ { "selector": "input[name=s]", "text": ["找不到想看的内容?搜索看看吧"] },
66
+ { "selector": ".previous", "text": ["去上一页看看吧"] },
67
+ { "selector": ".next", "text": ["去下一页看看吧"] },
68
+ { "selector": ".dropdown-toggle", "text": ["这里是菜单"] },
69
+ { "selector": "c-player a.play-icon", "text": ["想要听点音乐吗"] },
70
+ { "selector": "c-player div.time", "text": ["在这里可以调整<span style=\"color:#0099cc;\">播放进度</span>呢"] },
71
+ { "selector": "c-player div.volume", "text": ["在这里可以调整<span style=\"color:#0099cc;\">音量</span>呢"] },
72
+ { "selector": "c-player div.list-button", "text": ["<span style=\"color:#0099cc;\">播放列表</span>里都有什么呢"] },
73
+ { "selector": "c-player div.lyric-button", "text": ["有<span style=\"color:#0099cc;\">歌词</span>的话就能跟着一起唱呢"] },
74
+ { "selector": ".waifu #live2d", "text": [
75
+ "别玩了,快去学习!",
76
+ "偶尔放松下眼睛吧。",
77
+ "看什么看(*^▽^*)",
78
+ "焦虑时,吃顿大餐心情就好啦^_^",
79
+ "你这个年纪,怎么睡得着觉的你^_^",
80
+ "修改ADD_WAIFU=False,我就不再打扰你了~",
81
+ "经常去github看看我们的更新吧,也许有好玩的新功能呢。",
82
+ "试试本地大模型吧,有的也很强大的哦。",
83
+ "很多强大的函数插件隐藏在下拉菜单中呢。",
84
+ "红色的插件,使用之前需要把文件上传进去哦。",
85
+ "想添加功能按钮吗?读读readme很容易就学会啦。",
86
+ "敏感或机密的信息,不可以问chatGPT的哦!",
87
+ "chatGPT究竟是划时代的创新,还是扼杀创造力的毒药呢?"
88
+ ] }
89
+ ],
90
+ "click": [
91
+ {
92
+ "selector": ".waifu #live2d",
93
+ "text": [
94
+ "是…是不小心碰到了吧",
95
+ "萝莉控是什么呀",
96
+ "你看到我的小熊了吗",
97
+ "再摸的话我可要报警了!⌇●﹏●⌇",
98
+ "110吗,这里有个变态一直在摸我(ó﹏ò。)"
99
+ ]
100
+ }
101
+ ],
102
+ "seasons": [
103
+ { "date": "01/01", "text": ["<span style=\"color:#0099cc;\">元旦</span>了呢,新的一年又开始了,今年是{year}年~"] },
104
+ { "date": "02/14", "text": ["又是一年<span style=\"color:#0099cc;\">情人节</span>,{year}年找到对象了嘛~"] },
105
+ { "date": "03/08", "text": ["今天是<span style=\"color:#0099cc;\">妇女节</span>!"] },
106
+ { "date": "03/12", "text": ["今天是<span style=\"color:#0099cc;\">植树节</span>,要保护环境呀"] },
107
+ { "date": "04/01", "text": ["悄悄告诉你一个秘密~<span style=\"background-color:#34495e;\">今天是愚人节,不要被骗了哦~</span>"] },
108
+ { "date": "05/01", "text": ["今天是<span style=\"color:#0099cc;\">五一劳动节</span>,计划好假期去哪里了吗~"] },
109
+ { "date": "06/01", "text": ["<span style=\"color:#0099cc;\">儿童节</span>了呢,快活的时光总是短暂,要是永远长不大该多好啊…"] },
110
+ { "date": "09/03", "text": ["<span style=\"color:#0099cc;\">中国人民抗日战争胜利纪念日</span>,铭记历史、缅怀先烈、珍爱和平、开创未来。"] },
111
+ { "date": "09/10", "text": ["<span style=\"color:#0099cc;\">教师节</span>,在学校要给老师问声好呀~"] },
112
+ { "date": "10/01", "text": ["<span style=\"color:#0099cc;\">国庆节</span>,新中国已经成立69年了呢"] },
113
+ { "date": "11/05-11/12", "text": ["今年的<span style=\"color:#0099cc;\">双十一</span>是和谁一起过的呢~"] },
114
+ { "date": "12/20-12/31", "text": ["这几天是<span style=\"color:#0099cc;\">圣诞节</span>,主人肯定又去剁手买买买了~"] }
115
+ ]
116
+ }
docs/waifu_plugin/waifu.css ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .waifu {
2
+ position: fixed;
3
+ bottom: 0;
4
+ z-index: 1;
5
+ font-size: 0;
6
+ -webkit-transform: translateY(3px);
7
+ transform: translateY(3px);
8
+ }
9
+ .waifu:hover {
10
+ -webkit-transform: translateY(0);
11
+ transform: translateY(0);
12
+ }
13
+ .waifu-tips {
14
+ opacity: 0;
15
+ margin: -20px 20px;
16
+ padding: 5px 10px;
17
+ border: 1px solid rgba(224, 186, 140, 0.62);
18
+ border-radius: 12px;
19
+ background-color: rgba(236, 217, 188, 0.5);
20
+ box-shadow: 0 3px 15px 2px rgba(191, 158, 118, 0.2);
21
+ text-overflow: ellipsis;
22
+ overflow: hidden;
23
+ position: absolute;
24
+ animation-delay: 5s;
25
+ animation-duration: 50s;
26
+ animation-iteration-count: infinite;
27
+ animation-name: shake;
28
+ animation-timing-function: ease-in-out;
29
+ }
30
+ .waifu-tool {
31
+ display: none;
32
+ color: #aaa;
33
+ top: 50px;
34
+ right: 10px;
35
+ position: absolute;
36
+ }
37
+ .waifu:hover .waifu-tool {
38
+ display: block;
39
+ }
40
+ .waifu-tool span {
41
+ display: block;
42
+ cursor: pointer;
43
+ color: #5b6c7d;
44
+ transition: 0.2s;
45
+ }
46
+ .waifu-tool span:hover {
47
+ color: #34495e;
48
+ }
49
+ .waifu #live2d{
50
+ position: relative;
51
+ }
52
+
53
+ @keyframes shake {
54
+ 2% {
55
+ transform: translate(0.5px, -1.5px) rotate(-0.5deg);
56
+ }
57
+
58
+ 4% {
59
+ transform: translate(0.5px, 1.5px) rotate(1.5deg);
60
+ }
61
+
62
+ 6% {
63
+ transform: translate(1.5px, 1.5px) rotate(1.5deg);
64
+ }
65
+
66
+ 8% {
67
+ transform: translate(2.5px, 1.5px) rotate(0.5deg);
68
+ }
69
+
70
+ 10% {
71
+ transform: translate(0.5px, 2.5px) rotate(0.5deg);
72
+ }
73
+
74
+ 12% {
75
+ transform: translate(1.5px, 1.5px) rotate(0.5deg);
76
+ }
77
+
78
+ 14% {
79
+ transform: translate(0.5px, 0.5px) rotate(0.5deg);
80
+ }
81
+
82
+ 16% {
83
+ transform: translate(-1.5px, -0.5px) rotate(1.5deg);
84
+ }
85
+
86
+ 18% {
87
+ transform: translate(0.5px, 0.5px) rotate(1.5deg);
88
+ }
89
+
90
+ 20% {
91
+ transform: translate(2.5px, 2.5px) rotate(1.5deg);
92
+ }
93
+
94
+ 22% {
95
+ transform: translate(0.5px, -1.5px) rotate(1.5deg);
96
+ }
97
+
98
+ 24% {
99
+ transform: translate(-1.5px, 1.5px) rotate(-0.5deg);
100
+ }
101
+
102
+ 26% {
103
+ transform: translate(1.5px, 0.5px) rotate(1.5deg);
104
+ }
105
+
106
+ 28% {
107
+ transform: translate(-0.5px, -0.5px) rotate(-0.5deg);
108
+ }
109
+
110
+ 30% {
111
+ transform: translate(1.5px, -0.5px) rotate(-0.5deg);
112
+ }
113
+
114
+ 32% {
115
+ transform: translate(2.5px, -1.5px) rotate(1.5deg);
116
+ }
117
+
118
+ 34% {
119
+ transform: translate(2.5px, 2.5px) rotate(-0.5deg);
120
+ }
121
+
122
+ 36% {
123
+ transform: translate(0.5px, -1.5px) rotate(0.5deg);
124
+ }
125
+
126
+ 38% {
127
+ transform: translate(2.5px, -0.5px) rotate(-0.5deg);
128
+ }
129
+
130
+ 40% {
131
+ transform: translate(-0.5px, 2.5px) rotate(0.5deg);
132
+ }
133
+
134
+ 42% {
135
+ transform: translate(-1.5px, 2.5px) rotate(0.5deg);
136
+ }
137
+
138
+ 44% {
139
+ transform: translate(-1.5px, 1.5px) rotate(0.5deg);
140
+ }
141
+
142
+ 46% {
143
+ transform: translate(1.5px, -0.5px) rotate(-0.5deg);
144
+ }
145
+
146
+ 48% {
147
+ transform: translate(2.5px, -0.5px) rotate(0.5deg);
148
+ }
149
+
150
+ 50% {
151
+ transform: translate(-1.5px, 1.5px) rotate(0.5deg);
152
+ }
153
+
154
+ 52% {
155
+ transform: translate(-0.5px, 1.5px) rotate(0.5deg);
156
+ }
157
+
158
+ 54% {
159
+ transform: translate(-1.5px, 1.5px) rotate(0.5deg);
160
+ }
161
+
162
+ 56% {
163
+ transform: translate(0.5px, 2.5px) rotate(1.5deg);
164
+ }
165
+
166
+ 58% {
167
+ transform: translate(2.5px, 2.5px) rotate(0.5deg);
168
+ }
169
+
170
+ 60% {
171
+ transform: translate(2.5px, -1.5px) rotate(1.5deg);
172
+ }
173
+
174
+ 62% {
175
+ transform: translate(-1.5px, 0.5px) rotate(1.5deg);
176
+ }
177
+
178
+ 64% {
179
+ transform: translate(-1.5px, 1.5px) rotate(1.5deg);
180
+ }
181
+
182
+ 66% {
183
+ transform: translate(0.5px, 2.5px) rotate(1.5deg);
184
+ }
185
+
186
+ 68% {
187
+ transform: translate(2.5px, -1.5px) rotate(1.5deg);
188
+ }
189
+
190
+ 70% {
191
+ transform: translate(2.5px, 2.5px) rotate(0.5deg);
192
+ }
193
+
194
+ 72% {
195
+ transform: translate(-0.5px, -1.5px) rotate(1.5deg);
196
+ }
197
+
198
+ 74% {
199
+ transform: translate(-1.5px, 2.5px) rotate(1.5deg);
200
+ }
201
+
202
+ 76% {
203
+ transform: translate(-1.5px, 2.5px) rotate(1.5deg);
204
+ }
205
+
206
+ 78% {
207
+ transform: translate(-1.5px, 2.5px) rotate(0.5deg);
208
+ }
209
+
210
+ 80% {
211
+ transform: translate(-1.5px, 0.5px) rotate(-0.5deg);
212
+ }
213
+
214
+ 82% {
215
+ transform: translate(-1.5px, 0.5px) rotate(-0.5deg);
216
+ }
217
+
218
+ 84% {
219
+ transform: translate(-0.5px, 0.5px) rotate(1.5deg);
220
+ }
221
+
222
+ 86% {
223
+ transform: translate(2.5px, 1.5px) rotate(0.5deg);
224
+ }
225
+
226
+ 88% {
227
+ transform: translate(-1.5px, 0.5px) rotate(1.5deg);
228
+ }
229
+
230
+ 90% {
231
+ transform: translate(-1.5px, -0.5px) rotate(-0.5deg);
232
+ }
233
+
234
+ 92% {
235
+ transform: translate(-1.5px, -1.5px) rotate(1.5deg);
236
+ }
237
+
238
+ 94% {
239
+ transform: translate(0.5px, 0.5px) rotate(-0.5deg);
240
+ }
241
+
242
+ 96% {
243
+ transform: translate(2.5px, -0.5px) rotate(-0.5deg);
244
+ }
245
+
246
+ 98% {
247
+ transform: translate(-1.5px, -1.5px) rotate(-0.5deg);
248
+ }
249
+
250
+ 0%, 100% {
251
+ transform: translate(0, 0) rotate(0);
252
+ }
253
+ }
254
+ @font-face {
255
+ font-family: 'Flat-UI-Icons';
256
+ src: url('flat-ui-icons-regular.eot');
257
+ src: url('flat-ui-icons-regular.eot?#iefix') format('embedded-opentype'), url('flat-ui-icons-regular.woff') format('woff'), url('flat-ui-icons-regular.ttf') format('truetype'), url('flat-ui-icons-regular.svg#flat-ui-icons-regular') format('svg');
258
+ }
259
+ [class^="fui-"],
260
+ [class*="fui-"] {
261
+ font-family: 'Flat-UI-Icons';
262
+ speak: none;
263
+ font-style: normal;
264
+ font-weight: normal;
265
+ font-variant: normal;
266
+ text-transform: none;
267
+ -webkit-font-smoothing: antialiased;
268
+ -moz-osx-font-smoothing: grayscale;
269
+ }
270
+ .fui-cross:before {
271
+ content: "\e609";
272
+ }
273
+ .fui-info-circle:before {
274
+ content: "\e60f";
275
+ }
276
+ .fui-photo:before {
277
+ content: "\e62a";
278
+ }
279
+ .fui-eye:before {
280
+ content: "\e62c";
281
+ }
282
+ .fui-chat:before {
283
+ content: "\e62d";
284
+ }
285
+ .fui-home:before {
286
+ content: "\e62e";
287
+ }
288
+ .fui-user:before {
289
+ content: "\e631";
290
+ }
main.py CHANGED
@@ -59,7 +59,7 @@ with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=
59
  chatbot.style(height=CHATBOT_HEIGHT)
60
  history = gr.State([])
61
  with gr_L2(scale=1):
62
- with gr.Accordion("用户输入", open=True) as area_input_primary:
63
  with gr.Row():
64
  txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
65
  with gr.Row():
@@ -69,10 +69,11 @@ with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=
69
  stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
70
  clearBtn = gr.Button("清除", variant="secondary", visible=True); clearBtn.style(size="sm")
71
  with gr.Row():
72
- status = gr.Markdown(f"Tips: 按Enter提交; 按Shift+Enter换行。")
73
  with gr.Accordion("基础功能区", open=False) as area_basic_fn:
74
  with gr.Row():
75
  for k in functional:
 
76
  variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
77
  functional[k]["Button"] = gr.Button(k, variant=variant)
78
  with gr.Accordion("函数插件区", open=False) as area_crazy_fn:
@@ -114,7 +115,7 @@ with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=
114
  with gr.Row():
115
  resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn2.style(size="sm")
116
  stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm")
117
- clearBtn2 = gr.Button("清除", variant="secondary", visible=True); clearBtn2.style(size="sm")
118
  # 功能区显示开关与功能区的互动
119
  def fn_area_visibility(a):
120
  ret = {}
@@ -143,6 +144,7 @@ with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=
143
  clearBtn2.click(lambda: ("",""), None, [txt, txt2])
144
  # 基础功能区的回调函数注册
145
  for k in functional:
 
146
  click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
147
  cancel_handles.append(click_handle)
148
  # 文件上传区,接收文件后与chatbot的互动
@@ -172,13 +174,10 @@ with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=
172
  yield from ArgsGeneralWrapper(crazy_fns[k]["Function"])(*args, **kwargs)
173
  click_handle = switchy_bt.click(route,[switchy_bt, *input_combo, gr.State(PORT)], output_combo)
174
  click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
175
- # def expand_file_area(file_upload, area_file_up):
176
- # if len(file_upload)>0: return {area_file_up: gr.update(open=True)}
177
- # click_handle.then(expand_file_area, [file_upload, area_file_up], [area_file_up])
178
  cancel_handles.append(click_handle)
179
  # 终止按钮的回调函数注册
180
  stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
181
  stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
182
 
183
- app.launch(auth=(os.environ.get("USER"), os.environ.get("PASSWD")))
184
 
 
59
  chatbot.style(height=CHATBOT_HEIGHT)
60
  history = gr.State([])
61
  with gr_L2(scale=1):
62
+ with gr.Accordion("输入区", open=True) as area_input_primary:
63
  with gr.Row():
64
  txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
65
  with gr.Row():
 
69
  stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
70
  clearBtn = gr.Button("清除", variant="secondary", visible=True); clearBtn.style(size="sm")
71
  with gr.Row():
72
+ status = gr.Markdown(f"Tips: 按Enter提交, 按Shift+Enter换行。")
73
  with gr.Accordion("基础功能区", open=False) as area_basic_fn:
74
  with gr.Row():
75
  for k in functional:
76
+ if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
77
  variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
78
  functional[k]["Button"] = gr.Button(k, variant=variant)
79
  with gr.Accordion("函数插件区", open=False) as area_crazy_fn:
 
115
  with gr.Row():
116
  resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn2.style(size="sm")
117
  stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm")
118
+ clearBtn2 = gr.Button("清除", variant="secondary", visible=False); clearBtn2.style(size="sm")
119
  # 功能区显示开关与功能区的互动
120
  def fn_area_visibility(a):
121
  ret = {}
 
144
  clearBtn2.click(lambda: ("",""), None, [txt, txt2])
145
  # 基础功能区的回调函数注册
146
  for k in functional:
147
+ if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
148
  click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
149
  cancel_handles.append(click_handle)
150
  # 文件上传区,接收文件后与chatbot的互动
 
174
  yield from ArgsGeneralWrapper(crazy_fns[k]["Function"])(*args, **kwargs)
175
  click_handle = switchy_bt.click(route,[switchy_bt, *input_combo, gr.State(PORT)], output_combo)
176
  click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
 
 
 
177
  cancel_handles.append(click_handle)
178
  # 终止按钮的回调函数注册
179
  stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
180
  stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
181
 
182
+ app.launch()
183
 
multi_language.py ADDED
@@ -0,0 +1,499 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Translate this project to other languages
3
+ Usage:o
4
+ 1. modify LANG
5
+ LANG = "English"
6
+
7
+ 2. modify TransPrompt
8
+ TransPrompt = f"Replace each json value `#` with translated results in English, e.g., \"原始文本\":\"TranslatedText\". Keep Json format. Do not answer #."
9
+
10
+ 3. Run `python multi_language.py`.
11
+ Note: You need to run it multiple times to increase translation coverage because GPT makes mistakes sometimes.
12
+
13
+ 4. Find translated program in `multi-language\English\*`
14
+
15
+ """
16
+
17
+ import os
18
+ import json
19
+ import functools
20
+ import re
21
+ import pickle
22
+ import time
23
+
24
+ CACHE_FOLDER = "gpt_log"
25
+ blacklist = ['multi-language', 'gpt_log', '.git', 'private_upload', 'multi_language.py']
26
+
27
+ # LANG = "TraditionalChinese"
28
+ # TransPrompt = f"Replace each json value `#` with translated results in Traditional Chinese, e.g., \"原始文本\":\"翻譯後文字\". Keep Json format. Do not answer #."
29
+
30
+ # LANG = "Japanese"
31
+ # TransPrompt = f"Replace each json value `#` with translated results in Japanese, e.g., \"原始文本\":\"テキストの翻訳\". Keep Json format. Do not answer #."
32
+
33
+ LANG = "English"
34
+ TransPrompt = f"Replace each json value `#` with translated results in English, e.g., \"原始文本\":\"TranslatedText\". Keep Json format. Do not answer #."
35
+
36
+
37
+ if not os.path.exists(CACHE_FOLDER):
38
+ os.makedirs(CACHE_FOLDER)
39
+
40
+
41
+ def lru_file_cache(maxsize=128, ttl=None, filename=None):
42
+ """
43
+ Decorator that caches a function's return value after being called with given arguments.
44
+ It uses a Least Recently Used (LRU) cache strategy to limit the size of the cache.
45
+ maxsize: Maximum size of the cache. Defaults to 128.
46
+ ttl: Time-to-Live of the cache. If a value hasn't been accessed for `ttl` seconds, it will be evicted from the cache.
47
+ filename: Name of the file to store the cache in. If not supplied, the function name + ".cache" will be used.
48
+ """
49
+ cache_path = os.path.join(CACHE_FOLDER, f"{filename}.cache") if filename is not None else None
50
+
51
+ def decorator_function(func):
52
+ cache = {}
53
+ _cache_info = {
54
+ "hits": 0,
55
+ "misses": 0,
56
+ "maxsize": maxsize,
57
+ "currsize": 0,
58
+ "ttl": ttl,
59
+ "filename": cache_path,
60
+ }
61
+
62
+ @functools.wraps(func)
63
+ def wrapper_function(*args, **kwargs):
64
+ key = str((args, frozenset(kwargs)))
65
+ if key in cache:
66
+ if _cache_info["ttl"] is None or (cache[key][1] + _cache_info["ttl"]) >= time.time():
67
+ _cache_info["hits"] += 1
68
+ print(f'Warning, reading cache, last read {(time.time()-cache[key][1])//60} minutes ago'); time.sleep(2)
69
+ cache[key][1] = time.time()
70
+ return cache[key][0]
71
+ else:
72
+ del cache[key]
73
+
74
+ result = func(*args, **kwargs)
75
+ cache[key] = [result, time.time()]
76
+ _cache_info["misses"] += 1
77
+ _cache_info["currsize"] += 1
78
+
79
+ if _cache_info["currsize"] > _cache_info["maxsize"]:
80
+ oldest_key = None
81
+ for k in cache:
82
+ if oldest_key is None:
83
+ oldest_key = k
84
+ elif cache[k][1] < cache[oldest_key][1]:
85
+ oldest_key = k
86
+ del cache[oldest_key]
87
+ _cache_info["currsize"] -= 1
88
+
89
+ if cache_path is not None:
90
+ with open(cache_path, "wb") as f:
91
+ pickle.dump(cache, f)
92
+
93
+ return result
94
+
95
+ def cache_info():
96
+ return _cache_info
97
+
98
+ wrapper_function.cache_info = cache_info
99
+
100
+ if cache_path is not None and os.path.exists(cache_path):
101
+ with open(cache_path, "rb") as f:
102
+ cache = pickle.load(f)
103
+ _cache_info["currsize"] = len(cache)
104
+
105
+ return wrapper_function
106
+
107
+ return decorator_function
108
+
109
+ def contains_chinese(string):
110
+ """
111
+ Returns True if the given string contains Chinese characters, False otherwise.
112
+ """
113
+ chinese_regex = re.compile(u'[\u4e00-\u9fff]+')
114
+ return chinese_regex.search(string) is not None
115
+
116
+ def split_list(lst, n_each_req):
117
+ """
118
+ Split a list into smaller lists, each with a maximum number of elements.
119
+ :param lst: the list to split
120
+ :param n_each_req: the maximum number of elements in each sub-list
121
+ :return: a list of sub-lists
122
+ """
123
+ result = []
124
+ for i in range(0, len(lst), n_each_req):
125
+ result.append(lst[i:i + n_each_req])
126
+ return result
127
+
128
+ def map_to_json(map, language):
129
+ dict_ = read_map_from_json(language)
130
+ dict_.update(map)
131
+ with open(f'docs/translate_{language.lower()}.json', 'w', encoding='utf8') as f:
132
+ json.dump(dict_, f, indent=4, ensure_ascii=False)
133
+
134
+ def read_map_from_json(language):
135
+ if os.path.exists(f'docs/translate_{language.lower()}.json'):
136
+ with open(f'docs/translate_{language.lower()}.json', 'r', encoding='utf8') as f:
137
+ res = json.load(f)
138
+ res = {k:v for k, v in res.items() if v is not None and contains_chinese(k)}
139
+ return res
140
+ return {}
141
+
142
+ def advanced_split(splitted_string, spliter, include_spliter=False):
143
+ splitted_string_tmp = []
144
+ for string_ in splitted_string:
145
+ if spliter in string_:
146
+ splitted = string_.split(spliter)
147
+ for i, s in enumerate(splitted):
148
+ if include_spliter:
149
+ if i != len(splitted)-1:
150
+ splitted[i] += spliter
151
+ splitted[i] = splitted[i].strip()
152
+ for i in reversed(range(len(splitted))):
153
+ if not contains_chinese(splitted[i]):
154
+ splitted.pop(i)
155
+ splitted_string_tmp.extend(splitted)
156
+ else:
157
+ splitted_string_tmp.append(string_)
158
+ splitted_string = splitted_string_tmp
159
+ return splitted_string_tmp
160
+
161
+ cached_translation = {}
162
+ cached_translation = read_map_from_json(language=LANG)
163
+
164
+ def trans(word_to_translate, language, special=False):
165
+ if len(word_to_translate) == 0: return {}
166
+ from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
167
+ from toolbox import get_conf, ChatBotWithCookies
168
+ proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
169
+ get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
170
+ llm_kwargs = {
171
+ 'api_key': API_KEY,
172
+ 'llm_model': LLM_MODEL,
173
+ 'top_p':1.0,
174
+ 'max_length': None,
175
+ 'temperature':0.4,
176
+ }
177
+ import random
178
+ N_EACH_REQ = random.randint(16, 32)
179
+ word_to_translate_split = split_list(word_to_translate, N_EACH_REQ)
180
+ inputs_array = [str(s) for s in word_to_translate_split]
181
+ inputs_show_user_array = inputs_array
182
+ history_array = [[] for _ in inputs_array]
183
+ if special: # to English using CamelCase Naming Convention
184
+ sys_prompt_array = [f"Translate following names to English with CamelCase naming convention. Keep original format" for _ in inputs_array]
185
+ else:
186
+ sys_prompt_array = [f"Translate following sentences to {LANG}. E.g., You should translate sentences to the following format ['translation of sentence 1', 'translation of sentence 2']. Do NOT answer with Chinese!" for _ in inputs_array]
187
+ chatbot = ChatBotWithCookies(llm_kwargs)
188
+ gpt_say_generator = request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
189
+ inputs_array,
190
+ inputs_show_user_array,
191
+ llm_kwargs,
192
+ chatbot,
193
+ history_array,
194
+ sys_prompt_array,
195
+ )
196
+ while True:
197
+ try:
198
+ gpt_say = next(gpt_say_generator)
199
+ print(gpt_say[1][0][1])
200
+ except StopIteration as e:
201
+ result = e.value
202
+ break
203
+ translated_result = {}
204
+ for i, r in enumerate(result):
205
+ if i%2 == 1:
206
+ try:
207
+ res_before_trans = eval(result[i-1])
208
+ res_after_trans = eval(result[i])
209
+ if len(res_before_trans) != len(res_after_trans):
210
+ raise RuntimeError
211
+ for a,b in zip(res_before_trans, res_after_trans):
212
+ translated_result[a] = b
213
+ except:
214
+ # try:
215
+ # res_before_trans = word_to_translate_split[(i-1)//2]
216
+ # res_after_trans = [s for s in result[i].split("', '")]
217
+ # for a,b in zip(res_before_trans, res_after_trans):
218
+ # translated_result[a] = b
219
+ # except:
220
+ print('GPT输出格式错误,稍后可能需要再试一次')
221
+ res_before_trans = eval(result[i-1])
222
+ for a in res_before_trans:
223
+ translated_result[a] = None
224
+ return translated_result
225
+
226
+
227
+ def trans_json(word_to_translate, language, special=False):
228
+ if len(word_to_translate) == 0: return {}
229
+ from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
230
+ from toolbox import get_conf, ChatBotWithCookies
231
+ proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
232
+ get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
233
+ llm_kwargs = {
234
+ 'api_key': API_KEY,
235
+ 'llm_model': LLM_MODEL,
236
+ 'top_p':1.0,
237
+ 'max_length': None,
238
+ 'temperature':0.1,
239
+ }
240
+ import random
241
+ N_EACH_REQ = random.randint(16, 32)
242
+ random.shuffle(word_to_translate)
243
+ word_to_translate_split = split_list(word_to_translate, N_EACH_REQ)
244
+ inputs_array = [{k:"#" for k in s} for s in word_to_translate_split]
245
+ inputs_array = [ json.dumps(i, ensure_ascii=False) for i in inputs_array]
246
+
247
+ inputs_show_user_array = inputs_array
248
+ history_array = [[] for _ in inputs_array]
249
+ sys_prompt_array = [TransPrompt for _ in inputs_array]
250
+ chatbot = ChatBotWithCookies(llm_kwargs)
251
+ gpt_say_generator = request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
252
+ inputs_array,
253
+ inputs_show_user_array,
254
+ llm_kwargs,
255
+ chatbot,
256
+ history_array,
257
+ sys_prompt_array,
258
+ )
259
+ while True:
260
+ try:
261
+ gpt_say = next(gpt_say_generator)
262
+ print(gpt_say[1][0][1])
263
+ except StopIteration as e:
264
+ result = e.value
265
+ break
266
+ translated_result = {}
267
+ for i, r in enumerate(result):
268
+ if i%2 == 1:
269
+ try:
270
+ translated_result.update(json.loads(result[i]))
271
+ except:
272
+ print(result[i])
273
+ print(result)
274
+ return translated_result
275
+
276
+
277
+ def step_1_core_key_translate():
278
+ def extract_chinese_characters(file_path):
279
+ syntax = []
280
+ with open(file_path, 'r', encoding='utf-8') as f:
281
+ content = f.read()
282
+ import ast
283
+ root = ast.parse(content)
284
+ for node in ast.walk(root):
285
+ if isinstance(node, ast.Name):
286
+ if contains_chinese(node.id): syntax.append(node.id)
287
+ if isinstance(node, ast.Import):
288
+ for n in node.names:
289
+ if contains_chinese(n.name): syntax.append(n.name)
290
+ elif isinstance(node, ast.ImportFrom):
291
+ for n in node.names:
292
+ if contains_chinese(n.name): syntax.append(n.name)
293
+ for k in node.module.split('.'):
294
+ if contains_chinese(k): syntax.append(k)
295
+ return syntax
296
+
297
+ def extract_chinese_characters_from_directory(directory_path):
298
+ chinese_characters = []
299
+ for root, dirs, files in os.walk(directory_path):
300
+ if any([b in root for b in blacklist]):
301
+ continue
302
+ for file in files:
303
+ if file.endswith('.py'):
304
+ file_path = os.path.join(root, file)
305
+ chinese_characters.extend(extract_chinese_characters(file_path))
306
+ return chinese_characters
307
+
308
+ directory_path = './'
309
+ chinese_core_names = extract_chinese_characters_from_directory(directory_path)
310
+ chinese_core_keys = [name for name in chinese_core_names]
311
+ chinese_core_keys_norepeat = []
312
+ for d in chinese_core_keys:
313
+ if d not in chinese_core_keys_norepeat: chinese_core_keys_norepeat.append(d)
314
+ need_translate = []
315
+ cached_translation = read_map_from_json(language=LANG)
316
+ cached_translation_keys = list(cached_translation.keys())
317
+ for d in chinese_core_keys_norepeat:
318
+ if d not in cached_translation_keys:
319
+ need_translate.append(d)
320
+
321
+ need_translate_mapping = trans(need_translate, language=LANG, special=True)
322
+ map_to_json(need_translate_mapping, language=LANG)
323
+ cached_translation = read_map_from_json(language=LANG)
324
+ cached_translation = dict(sorted(cached_translation.items(), key=lambda x: -len(x[0])))
325
+
326
+ chinese_core_keys_norepeat_mapping = {}
327
+ for k in chinese_core_keys_norepeat:
328
+ chinese_core_keys_norepeat_mapping.update({k:cached_translation[k]})
329
+ chinese_core_keys_norepeat_mapping = dict(sorted(chinese_core_keys_norepeat_mapping.items(), key=lambda x: -len(x[0])))
330
+
331
+ # ===============================================
332
+ # copy
333
+ # ===============================================
334
+ def copy_source_code():
335
+
336
+ from toolbox import get_conf
337
+ import shutil
338
+ import os
339
+ try: shutil.rmtree(f'./multi-language/{LANG}/')
340
+ except: pass
341
+ os.makedirs(f'./multi-language', exist_ok=True)
342
+ backup_dir = f'./multi-language/{LANG}/'
343
+ shutil.copytree('./', backup_dir, ignore=lambda x, y: blacklist)
344
+ copy_source_code()
345
+
346
+ # ===============================================
347
+ # primary key replace
348
+ # ===============================================
349
+ directory_path = f'./multi-language/{LANG}/'
350
+ for root, dirs, files in os.walk(directory_path):
351
+ for file in files:
352
+ if file.endswith('.py'):
353
+ file_path = os.path.join(root, file)
354
+ syntax = []
355
+ # read again
356
+ with open(file_path, 'r', encoding='utf-8') as f:
357
+ content = f.read()
358
+
359
+ for k, v in chinese_core_keys_norepeat_mapping.items():
360
+ content = content.replace(k, v)
361
+
362
+ with open(file_path, 'w', encoding='utf-8') as f:
363
+ f.write(content)
364
+
365
+
366
+ def step_2_core_key_translate():
367
+
368
+ # =================================================================================================
369
+ # step2
370
+ # =================================================================================================
371
+
372
+ def load_string(strings, string_input):
373
+ string_ = string_input.strip().strip(',').strip().strip('.').strip()
374
+ if string_.startswith('[Local Message]'):
375
+ string_ = string_.replace('[Local Message]', '')
376
+ string_ = string_.strip().strip(',').strip().strip('.').strip()
377
+ splitted_string = [string_]
378
+ # --------------------------------------
379
+ splitted_string = advanced_split(splitted_string, spliter=",", include_spliter=False)
380
+ splitted_string = advanced_split(splitted_string, spliter="。", include_spliter=False)
381
+ splitted_string = advanced_split(splitted_string, spliter=")", include_spliter=False)
382
+ splitted_string = advanced_split(splitted_string, spliter="(", include_spliter=False)
383
+ splitted_string = advanced_split(splitted_string, spliter="(", include_spliter=False)
384
+ splitted_string = advanced_split(splitted_string, spliter=")", include_spliter=False)
385
+ splitted_string = advanced_split(splitted_string, spliter="<", include_spliter=False)
386
+ splitted_string = advanced_split(splitted_string, spliter=">", include_spliter=False)
387
+ splitted_string = advanced_split(splitted_string, spliter="[", include_spliter=False)
388
+ splitted_string = advanced_split(splitted_string, spliter="]", include_spliter=False)
389
+ splitted_string = advanced_split(splitted_string, spliter="【", include_spliter=False)
390
+ splitted_string = advanced_split(splitted_string, spliter="】", include_spliter=False)
391
+ splitted_string = advanced_split(splitted_string, spliter="?", include_spliter=False)
392
+ splitted_string = advanced_split(splitted_string, spliter=":", include_spliter=False)
393
+ splitted_string = advanced_split(splitted_string, spliter=":", include_spliter=False)
394
+ splitted_string = advanced_split(splitted_string, spliter=",", include_spliter=False)
395
+ splitted_string = advanced_split(splitted_string, spliter="#", include_spliter=False)
396
+ splitted_string = advanced_split(splitted_string, spliter="\n", include_spliter=False)
397
+ splitted_string = advanced_split(splitted_string, spliter=";", include_spliter=False)
398
+ splitted_string = advanced_split(splitted_string, spliter="`", include_spliter=False)
399
+ splitted_string = advanced_split(splitted_string, spliter=" ", include_spliter=False)
400
+ splitted_string = advanced_split(splitted_string, spliter="- ", include_spliter=False)
401
+ splitted_string = advanced_split(splitted_string, spliter="---", include_spliter=False)
402
+
403
+ # --------------------------------------
404
+ for j, s in enumerate(splitted_string): # .com
405
+ if '.com' in s: continue
406
+ if '\'' in s: continue
407
+ if '\"' in s: continue
408
+ strings.append([s,0])
409
+
410
+
411
+ def get_strings(node):
412
+ strings = []
413
+ # recursively traverse the AST
414
+ for child in ast.iter_child_nodes(node):
415
+ node = child
416
+ if isinstance(child, ast.Str):
417
+ if contains_chinese(child.s):
418
+ load_string(strings=strings, string_input=child.s)
419
+ elif isinstance(child, ast.AST):
420
+ strings.extend(get_strings(child))
421
+ return strings
422
+
423
+ string_literals = []
424
+ directory_path = f'./multi-language/{LANG}/'
425
+ for root, dirs, files in os.walk(directory_path):
426
+ for file in files:
427
+ if file.endswith('.py'):
428
+ file_path = os.path.join(root, file)
429
+ syntax = []
430
+ with open(file_path, 'r', encoding='utf-8') as f:
431
+ content = f.read()
432
+ # comments
433
+ comments_arr = []
434
+ for code_sp in content.splitlines():
435
+ comments = re.findall(r'#.*$', code_sp)
436
+ for comment in comments:
437
+ load_string(strings=comments_arr, string_input=comment)
438
+ string_literals.extend(comments_arr)
439
+
440
+ # strings
441
+ import ast
442
+ tree = ast.parse(content)
443
+ res = get_strings(tree, )
444
+ string_literals.extend(res)
445
+
446
+ [print(s) for s in string_literals]
447
+ chinese_literal_names = []
448
+ chinese_literal_names_norepeat = []
449
+ for string, offset in string_literals:
450
+ chinese_literal_names.append(string)
451
+ chinese_literal_names_norepeat = []
452
+ for d in chinese_literal_names:
453
+ if d not in chinese_literal_names_norepeat: chinese_literal_names_norepeat.append(d)
454
+ need_translate = []
455
+ cached_translation = read_map_from_json(language=LANG)
456
+ cached_translation_keys = list(cached_translation.keys())
457
+ for d in chinese_literal_names_norepeat:
458
+ if d not in cached_translation_keys:
459
+ need_translate.append(d)
460
+
461
+
462
+ up = trans_json(need_translate, language=LANG, special=False)
463
+ map_to_json(up, language=LANG)
464
+ cached_translation = read_map_from_json(language=LANG)
465
+ cached_translation = dict(sorted(cached_translation.items(), key=lambda x: -len(x[0])))
466
+
467
+ # ===============================================
468
+ # literal key replace
469
+ # ===============================================
470
+ directory_path = f'./multi-language/{LANG}/'
471
+ for root, dirs, files in os.walk(directory_path):
472
+ for file in files:
473
+ if file.endswith('.py'):
474
+ file_path = os.path.join(root, file)
475
+ syntax = []
476
+ # read again
477
+ with open(file_path, 'r', encoding='utf-8') as f:
478
+ content = f.read()
479
+
480
+ for k, v in cached_translation.items():
481
+ if v is None: continue
482
+ if '"' in v:
483
+ v = v.replace('"', "`")
484
+ if '\'' in v:
485
+ v = v.replace('\'', "`")
486
+ content = content.replace(k, v)
487
+
488
+ with open(file_path, 'w', encoding='utf-8') as f:
489
+ f.write(content)
490
+
491
+ if file.strip('.py') in cached_translation:
492
+ file_new = cached_translation[file.strip('.py')] + '.py'
493
+ file_path_new = os.path.join(root, file_new)
494
+ with open(file_path_new, 'w', encoding='utf-8') as f:
495
+ f.write(content)
496
+ os.remove(file_path)
497
+
498
+ step_1_core_key_translate()
499
+ step_2_core_key_translate()
request_llm/README.md CHANGED
@@ -13,6 +13,31 @@ LLM_MODEL = "chatglm"
13
  `python main.py`
14
  ```
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
  ---
18
  ## Text-Generation-UI (TGUI,调试中,暂不可用)
 
13
  `python main.py`
14
  ```
15
 
16
+ ## Claude-Stack
17
+
18
+ - 请参考此教程获取 https://zhuanlan.zhihu.com/p/627485689
19
+ - 1、SLACK_CLAUDE_BOT_ID
20
+ - 2、SLACK_CLAUDE_USER_TOKEN
21
+
22
+ - 把token加入config.py
23
+
24
+ ## Newbing
25
+
26
+ - 使用cookie editor获取cookie(json)
27
+ - 把cookie(json)加入config.py (NEWBING_COOKIES)
28
+
29
+ ## Moss
30
+ - 使用docker-compose
31
+
32
+ ## RWKV
33
+ - 使用docker-compose
34
+
35
+ ## LLAMA
36
+ - 使用docker-compose
37
+
38
+ ## 盘古
39
+ - 使用docker-compose
40
+
41
 
42
  ---
43
  ## Text-Generation-UI (TGUI,调试中,暂不可用)
request_llm/bridge_all.py CHANGED
@@ -11,7 +11,7 @@
11
  import tiktoken
12
  from functools import lru_cache
13
  from concurrent.futures import ThreadPoolExecutor
14
- from toolbox import get_conf
15
 
16
  from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
17
  from .bridge_chatgpt import predict as chatgpt_ui
@@ -19,6 +19,9 @@ from .bridge_chatgpt import predict as chatgpt_ui
19
  from .bridge_chatglm import predict_no_ui_long_connection as chatglm_noui
20
  from .bridge_chatglm import predict as chatglm_ui
21
 
 
 
 
22
  # from .bridge_tgui import predict_no_ui_long_connection as tgui_noui
23
  # from .bridge_tgui import predict as tgui_ui
24
 
@@ -48,6 +51,7 @@ class LazyloadTiktoken(object):
48
  API_URL_REDIRECT, = get_conf("API_URL_REDIRECT")
49
  openai_endpoint = "https://api.openai.com/v1/chat/completions"
50
  api2d_endpoint = "https://openai.api2d.net/v1/chat/completions"
 
51
  # 兼容旧版的配置
52
  try:
53
  API_URL, = get_conf("API_URL")
@@ -59,6 +63,7 @@ except:
59
  # 新版配置
60
  if openai_endpoint in API_URL_REDIRECT: openai_endpoint = API_URL_REDIRECT[openai_endpoint]
61
  if api2d_endpoint in API_URL_REDIRECT: api2d_endpoint = API_URL_REDIRECT[api2d_endpoint]
 
62
 
63
 
64
  # 获取tokenizer
@@ -116,10 +121,88 @@ model_info = {
116
  "tokenizer": tokenizer_gpt35,
117
  "token_cnt": get_token_num_gpt35,
118
  },
 
 
 
 
 
 
 
 
 
119
 
120
  }
121
 
122
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
  def LLM_CATCH_EXCEPTION(f):
124
  """
125
  装饰器函数,将错误显示出来
@@ -128,10 +211,7 @@ def LLM_CATCH_EXCEPTION(f):
128
  try:
129
  return f(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
130
  except Exception as e:
131
- from toolbox import get_conf
132
- import traceback
133
- proxies, = get_conf('proxies')
134
- tb_str = '\n```\n' + traceback.format_exc() + '\n```\n'
135
  observe_window[0] = tb_str
136
  return tb_str
137
  return decorated
@@ -182,7 +262,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
182
 
183
  def mutex_manager(window_mutex, observe_window):
184
  while True:
185
- time.sleep(0.5)
186
  if not window_mutex[-1]: break
187
  # 看门狗(watchdog)
188
  for i in range(n_model):
 
11
  import tiktoken
12
  from functools import lru_cache
13
  from concurrent.futures import ThreadPoolExecutor
14
+ from toolbox import get_conf, trimmed_format_exc
15
 
16
  from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
17
  from .bridge_chatgpt import predict as chatgpt_ui
 
19
  from .bridge_chatglm import predict_no_ui_long_connection as chatglm_noui
20
  from .bridge_chatglm import predict as chatglm_ui
21
 
22
+ from .bridge_newbing import predict_no_ui_long_connection as newbing_noui
23
+ from .bridge_newbing import predict as newbing_ui
24
+
25
  # from .bridge_tgui import predict_no_ui_long_connection as tgui_noui
26
  # from .bridge_tgui import predict as tgui_ui
27
 
 
51
  API_URL_REDIRECT, = get_conf("API_URL_REDIRECT")
52
  openai_endpoint = "https://api.openai.com/v1/chat/completions"
53
  api2d_endpoint = "https://openai.api2d.net/v1/chat/completions"
54
+ newbing_endpoint = "wss://sydney.bing.com/sydney/ChatHub"
55
  # 兼容旧版的配置
56
  try:
57
  API_URL, = get_conf("API_URL")
 
63
  # 新版配置
64
  if openai_endpoint in API_URL_REDIRECT: openai_endpoint = API_URL_REDIRECT[openai_endpoint]
65
  if api2d_endpoint in API_URL_REDIRECT: api2d_endpoint = API_URL_REDIRECT[api2d_endpoint]
66
+ if newbing_endpoint in API_URL_REDIRECT: newbing_endpoint = API_URL_REDIRECT[newbing_endpoint]
67
 
68
 
69
  # 获取tokenizer
 
121
  "tokenizer": tokenizer_gpt35,
122
  "token_cnt": get_token_num_gpt35,
123
  },
124
+ # newbing
125
+ "newbing": {
126
+ "fn_with_ui": newbing_ui,
127
+ "fn_without_ui": newbing_noui,
128
+ "endpoint": newbing_endpoint,
129
+ "max_token": 4096,
130
+ "tokenizer": tokenizer_gpt35,
131
+ "token_cnt": get_token_num_gpt35,
132
+ },
133
 
134
  }
135
 
136
 
137
+ AVAIL_LLM_MODELS, = get_conf("AVAIL_LLM_MODELS")
138
+ if "jittorllms_rwkv" in AVAIL_LLM_MODELS:
139
+ from .bridge_jittorllms_rwkv import predict_no_ui_long_connection as rwkv_noui
140
+ from .bridge_jittorllms_rwkv import predict as rwkv_ui
141
+ model_info.update({
142
+ "jittorllms_rwkv": {
143
+ "fn_with_ui": rwkv_ui,
144
+ "fn_without_ui": rwkv_noui,
145
+ "endpoint": None,
146
+ "max_token": 1024,
147
+ "tokenizer": tokenizer_gpt35,
148
+ "token_cnt": get_token_num_gpt35,
149
+ },
150
+ })
151
+ if "jittorllms_llama" in AVAIL_LLM_MODELS:
152
+ from .bridge_jittorllms_llama import predict_no_ui_long_connection as llama_noui
153
+ from .bridge_jittorllms_llama import predict as llama_ui
154
+ model_info.update({
155
+ "jittorllms_llama": {
156
+ "fn_with_ui": llama_ui,
157
+ "fn_without_ui": llama_noui,
158
+ "endpoint": None,
159
+ "max_token": 1024,
160
+ "tokenizer": tokenizer_gpt35,
161
+ "token_cnt": get_token_num_gpt35,
162
+ },
163
+ })
164
+ if "jittorllms_pangualpha" in AVAIL_LLM_MODELS:
165
+ from .bridge_jittorllms_pangualpha import predict_no_ui_long_connection as pangualpha_noui
166
+ from .bridge_jittorllms_pangualpha import predict as pangualpha_ui
167
+ model_info.update({
168
+ "jittorllms_pangualpha": {
169
+ "fn_with_ui": pangualpha_ui,
170
+ "fn_without_ui": pangualpha_noui,
171
+ "endpoint": None,
172
+ "max_token": 1024,
173
+ "tokenizer": tokenizer_gpt35,
174
+ "token_cnt": get_token_num_gpt35,
175
+ },
176
+ })
177
+ if "moss" in AVAIL_LLM_MODELS:
178
+ from .bridge_moss import predict_no_ui_long_connection as moss_noui
179
+ from .bridge_moss import predict as moss_ui
180
+ model_info.update({
181
+ "moss": {
182
+ "fn_with_ui": moss_ui,
183
+ "fn_without_ui": moss_noui,
184
+ "endpoint": None,
185
+ "max_token": 1024,
186
+ "tokenizer": tokenizer_gpt35,
187
+ "token_cnt": get_token_num_gpt35,
188
+ },
189
+ })
190
+ if "stack-claude" in AVAIL_LLM_MODELS:
191
+ from .bridge_stackclaude import predict_no_ui_long_connection as claude_noui
192
+ from .bridge_stackclaude import predict as claude_ui
193
+ # claude
194
+ model_info.update({
195
+ "stack-claude": {
196
+ "fn_with_ui": claude_ui,
197
+ "fn_without_ui": claude_noui,
198
+ "endpoint": None,
199
+ "max_token": 8192,
200
+ "tokenizer": tokenizer_gpt35,
201
+ "token_cnt": get_token_num_gpt35,
202
+ }
203
+ })
204
+
205
+
206
  def LLM_CATCH_EXCEPTION(f):
207
  """
208
  装饰器函数,将错误显示出来
 
211
  try:
212
  return f(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
213
  except Exception as e:
214
+ tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
 
 
 
215
  observe_window[0] = tb_str
216
  return tb_str
217
  return decorated
 
262
 
263
  def mutex_manager(window_mutex, observe_window):
264
  while True:
265
+ time.sleep(0.25)
266
  if not window_mutex[-1]: break
267
  # 看门狗(watchdog)
268
  for i in range(n_model):
request_llm/bridge_chatglm.py CHANGED
@@ -1,6 +1,7 @@
1
 
2
  from transformers import AutoModel, AutoTokenizer
3
  import time
 
4
  import importlib
5
  from toolbox import update_ui, get_conf
6
  from multiprocessing import Process, Pipe
@@ -18,6 +19,7 @@ class GetGLMHandle(Process):
18
  self.success = True
19
  self.check_dependency()
20
  self.start()
 
21
 
22
  def check_dependency(self):
23
  try:
@@ -32,6 +34,7 @@ class GetGLMHandle(Process):
32
  return self.chatglm_model is not None
33
 
34
  def run(self):
 
35
  # 第一次运行,加载参数
36
  retry = 0
37
  while True:
@@ -53,17 +56,26 @@ class GetGLMHandle(Process):
53
  self.child.send('[Local Message] Call ChatGLM fail 不能正常加载ChatGLM的参数。')
54
  raise RuntimeError("不能正常加载ChatGLM的参数!")
55
 
56
- # 进入任务等待状态
57
  while True:
 
58
  kwargs = self.child.recv()
 
59
  try:
60
  for response, history in self.chatglm_model.stream_chat(self.chatglm_tokenizer, **kwargs):
61
  self.child.send(response)
 
 
 
 
62
  except:
63
- self.child.send('[Local Message] Call ChatGLM fail.')
 
 
64
  self.child.send('[Finish]')
65
 
66
  def stream_chat(self, **kwargs):
 
 
67
  self.parent.send(kwargs)
68
  while True:
69
  res = self.parent.recv()
@@ -71,12 +83,12 @@ class GetGLMHandle(Process):
71
  yield res
72
  else:
73
  break
74
- return
75
 
76
  global glm_handle
77
  glm_handle = None
78
  #################################################################################
79
- def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
80
  """
81
  多线程方法
82
  函数的说明请见 request_llm/bridge_all.py
@@ -84,7 +96,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
84
  global glm_handle
85
  if glm_handle is None:
86
  glm_handle = GetGLMHandle()
87
- observe_window[0] = load_message + "\n\n" + glm_handle.info
88
  if not glm_handle.success:
89
  error = glm_handle.info
90
  glm_handle = None
@@ -99,7 +111,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
99
  watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
100
  response = ""
101
  for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
102
- observe_window[0] = response
103
  if len(observe_window) >= 2:
104
  if (time.time()-observe_window[1]) > watch_dog_patience:
105
  raise RuntimeError("程序终止。")
@@ -130,14 +142,20 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
130
  if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
131
  inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
132
 
 
133
  history_feedin = []
134
  history_feedin.append(["What can I do?", system_prompt] )
135
  for i in range(len(history)//2):
136
  history_feedin.append([history[2*i], history[2*i+1]] )
137
 
 
 
138
  for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
139
  chatbot[-1] = (inputs, response)
140
  yield from update_ui(chatbot=chatbot, history=history)
141
 
 
 
 
142
  history.extend([inputs, response])
143
- yield from update_ui(chatbot=chatbot, history=history)
 
1
 
2
  from transformers import AutoModel, AutoTokenizer
3
  import time
4
+ import threading
5
  import importlib
6
  from toolbox import update_ui, get_conf
7
  from multiprocessing import Process, Pipe
 
19
  self.success = True
20
  self.check_dependency()
21
  self.start()
22
+ self.threadLock = threading.Lock()
23
 
24
  def check_dependency(self):
25
  try:
 
34
  return self.chatglm_model is not None
35
 
36
  def run(self):
37
+ # 子进程执行
38
  # 第一次运行,加载参数
39
  retry = 0
40
  while True:
 
56
  self.child.send('[Local Message] Call ChatGLM fail 不能正常加载ChatGLM的参数。')
57
  raise RuntimeError("不能正常加载ChatGLM的参数!")
58
 
 
59
  while True:
60
+ # 进入任务等待状态
61
  kwargs = self.child.recv()
62
+ # 收到消息,开始请求
63
  try:
64
  for response, history in self.chatglm_model.stream_chat(self.chatglm_tokenizer, **kwargs):
65
  self.child.send(response)
66
+ # # 中途接收可能的终止指令(如果有的话)
67
+ # if self.child.poll():
68
+ # command = self.child.recv()
69
+ # if command == '[Terminate]': break
70
  except:
71
+ from toolbox import trimmed_format_exc
72
+ self.child.send('[Local Message] Call ChatGLM fail.' + '\n```\n' + trimmed_format_exc() + '\n```\n')
73
+ # 请求处理结束,开始下一个循环
74
  self.child.send('[Finish]')
75
 
76
  def stream_chat(self, **kwargs):
77
+ # 主进程执行
78
+ self.threadLock.acquire()
79
  self.parent.send(kwargs)
80
  while True:
81
  res = self.parent.recv()
 
83
  yield res
84
  else:
85
  break
86
+ self.threadLock.release()
87
 
88
  global glm_handle
89
  glm_handle = None
90
  #################################################################################
91
+ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
92
  """
93
  多线程方法
94
  函数的说明请见 request_llm/bridge_all.py
 
96
  global glm_handle
97
  if glm_handle is None:
98
  glm_handle = GetGLMHandle()
99
+ if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + glm_handle.info
100
  if not glm_handle.success:
101
  error = glm_handle.info
102
  glm_handle = None
 
111
  watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
112
  response = ""
113
  for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
114
+ if len(observe_window) >= 1: observe_window[0] = response
115
  if len(observe_window) >= 2:
116
  if (time.time()-observe_window[1]) > watch_dog_patience:
117
  raise RuntimeError("程序终止。")
 
142
  if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
143
  inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
144
 
145
+ # 处理历史信息
146
  history_feedin = []
147
  history_feedin.append(["What can I do?", system_prompt] )
148
  for i in range(len(history)//2):
149
  history_feedin.append([history[2*i], history[2*i+1]] )
150
 
151
+ # 开始接收chatglm的回复
152
+ response = "[Local Message]: 等待ChatGLM响应中 ..."
153
  for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
154
  chatbot[-1] = (inputs, response)
155
  yield from update_ui(chatbot=chatbot, history=history)
156
 
157
+ # 总结输出
158
+ if response == "[Local Message]: 等待ChatGLM响应中 ...":
159
+ response = "[Local Message]: ChatGLM响应异常 ..."
160
  history.extend([inputs, response])
161
+ yield from update_ui(chatbot=chatbot, history=history)
request_llm/bridge_chatgpt.py CHANGED
@@ -21,7 +21,7 @@ import importlib
21
 
22
  # config_private.py放自己的秘密如API和代理网址
23
  # 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
24
- from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys
25
  proxies, API_KEY, TIMEOUT_SECONDS, MAX_RETRY = \
26
  get_conf('proxies', 'API_KEY', 'TIMEOUT_SECONDS', 'MAX_RETRY')
27
 
@@ -145,7 +145,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
145
  yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面
146
  return
147
 
148
- history.append(inputs); history.append(" ")
149
 
150
  retry = 0
151
  while True:
@@ -168,7 +168,15 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
168
  if stream:
169
  stream_response = response.iter_lines()
170
  while True:
171
- chunk = next(stream_response)
 
 
 
 
 
 
 
 
172
  # print(chunk.decode()[6:])
173
  if is_head_of_the_stream and (r'"object":"error"' not in chunk.decode()):
174
  # 数据流的第一帧不携带content
@@ -198,22 +206,25 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
198
  chunk_decoded = chunk.decode()
199
  error_msg = chunk_decoded
200
  if "reduce the length" in error_msg:
201
- chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长,或历史数据过长. 历史缓存数据现已释放,您可以请再次尝试.")
202
- history = [] # 清除历史
 
 
 
203
  elif "does not exist" in error_msg:
204
- chatbot[-1] = (chatbot[-1][0], f"[Local Message] Model {llm_kwargs['llm_model']} does not exist. 模型不存在,或者您没有获得体验资格.")
205
  elif "Incorrect API key" in error_msg:
206
- chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. OpenAI以提供了不正确的API_KEY为由,拒绝服务.")
207
  elif "exceeded your current quota" in error_msg:
208
- chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由,拒绝服务.")
209
  elif "bad forward key" in error_msg:
210
  chatbot[-1] = (chatbot[-1][0], "[Local Message] Bad forward key. API2D账户额度不足.")
211
  elif "Not enough point" in error_msg:
212
  chatbot[-1] = (chatbot[-1][0], "[Local Message] Not enough point. API2D账户点数不足.")
213
  else:
214
  from toolbox import regular_txt_to_markdown
215
- tb_str = '```\n' + traceback.format_exc() + '```'
216
- chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded[4:])}")
217
  yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面
218
  return
219
 
 
21
 
22
  # config_private.py放自己的秘密如API和代理网址
23
  # 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
24
+ from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc
25
  proxies, API_KEY, TIMEOUT_SECONDS, MAX_RETRY = \
26
  get_conf('proxies', 'API_KEY', 'TIMEOUT_SECONDS', 'MAX_RETRY')
27
 
 
145
  yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面
146
  return
147
 
148
+ history.append(inputs); history.append("")
149
 
150
  retry = 0
151
  while True:
 
168
  if stream:
169
  stream_response = response.iter_lines()
170
  while True:
171
+ try:
172
+ chunk = next(stream_response)
173
+ except StopIteration:
174
+ # 非OpenAI官方接口的出现这样的报错,OpenAI和API2D不会走这里
175
+ from toolbox import regular_txt_to_markdown; tb_str = '```\n' + trimmed_format_exc() + '```'
176
+ chatbot[-1] = (chatbot[-1][0], f"[Local Message] 远程返回错误: \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk.decode())}")
177
+ yield from update_ui(chatbot=chatbot, history=history, msg="远程返回错误:" + chunk.decode()) # 刷新界面
178
+ return
179
+
180
  # print(chunk.decode()[6:])
181
  if is_head_of_the_stream and (r'"object":"error"' not in chunk.decode()):
182
  # 数据流的第一帧不携带content
 
206
  chunk_decoded = chunk.decode()
207
  error_msg = chunk_decoded
208
  if "reduce the length" in error_msg:
209
+ if len(history) >= 2: history[-1] = ""; history[-2] = "" # 清除当前溢出的输入:history[-2] 是本次输入, history[-1] 是本次输出
210
+ history = clip_history(inputs=inputs, history=history, tokenizer=model_info[llm_kwargs['llm_model']]['tokenizer'],
211
+ max_token_limit=(model_info[llm_kwargs['llm_model']]['max_token'])) # history至少释放二分之一
212
+ chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长, 或历史数据过长. 历史缓存数据已部分释放, 您可以请再次尝试. (若再次失败则更可能是因为输入过长.)")
213
+ # history = [] # 清除历史
214
  elif "does not exist" in error_msg:
215
+ chatbot[-1] = (chatbot[-1][0], f"[Local Message] Model {llm_kwargs['llm_model']} does not exist. 模型不存在, 或者您没有获得体验资格.")
216
  elif "Incorrect API key" in error_msg:
217
+ chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. OpenAI以提供了不正确的API_KEY为由, 拒绝服务.")
218
  elif "exceeded your current quota" in error_msg:
219
+ chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由, 拒绝服务.")
220
  elif "bad forward key" in error_msg:
221
  chatbot[-1] = (chatbot[-1][0], "[Local Message] Bad forward key. API2D账户额度不足.")
222
  elif "Not enough point" in error_msg:
223
  chatbot[-1] = (chatbot[-1][0], "[Local Message] Not enough point. API2D账户点数不足.")
224
  else:
225
  from toolbox import regular_txt_to_markdown
226
+ tb_str = '```\n' + trimmed_format_exc() + '```'
227
+ chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded)}")
228
  yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面
229
  return
230
 
request_llm/bridge_jittorllms_llama.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from transformers import AutoModel, AutoTokenizer
3
+ import time
4
+ import threading
5
+ import importlib
6
+ from toolbox import update_ui, get_conf
7
+ from multiprocessing import Process, Pipe
8
+
9
+ load_message = "jittorllms尚未加载,加载需要一段时间。注意,请避免混用多种jittor模型,否则可能导致显存溢出而造成卡顿,取决于`config.py`的配置,jittorllms消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
10
+
11
+ #################################################################################
12
+ class GetGLMHandle(Process):
13
+ def __init__(self):
14
+ super().__init__(daemon=True)
15
+ self.parent, self.child = Pipe()
16
+ self.jittorllms_model = None
17
+ self.info = ""
18
+ self.local_history = []
19
+ self.success = True
20
+ self.check_dependency()
21
+ self.start()
22
+ self.threadLock = threading.Lock()
23
+
24
+ def check_dependency(self):
25
+ try:
26
+ import pandas
27
+ self.info = "依赖检测通过"
28
+ self.success = True
29
+ except:
30
+ from toolbox import trimmed_format_exc
31
+ self.info = r"缺少jittorllms的依赖,如果要使用jittorllms,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\
32
+ r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llm/jittorllms`两个指令来安装jittorllms的依赖(在项目根目录运行这两个指令)。" +\
33
+ r"警告:安装jittorllms依赖后将完全破坏现有的pytorch环境,建议使用docker环境!" + trimmed_format_exc()
34
+ self.success = False
35
+
36
+ def ready(self):
37
+ return self.jittorllms_model is not None
38
+
39
+ def run(self):
40
+ # 子进程执行
41
+ # 第一次运行,加载参数
42
+ def validate_path():
43
+ import os, sys
44
+ dir_name = os.path.dirname(__file__)
45
+ env = os.environ.get("PATH", "")
46
+ os.environ["PATH"] = env.replace('/cuda/bin', '/x/bin')
47
+ root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..')
48
+ os.chdir(root_dir_assume + '/request_llm/jittorllms')
49
+ sys.path.append(root_dir_assume + '/request_llm/jittorllms')
50
+ validate_path() # validate path so you can run from base directory
51
+
52
+ def load_model():
53
+ import types
54
+ try:
55
+ if self.jittorllms_model is None:
56
+ device, = get_conf('LOCAL_MODEL_DEVICE')
57
+ from .jittorllms.models import get_model
58
+ # availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"]
59
+ args_dict = {'model': 'llama'}
60
+ print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))')
61
+ self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))
62
+ print('done get model')
63
+ except:
64
+ self.child.send('[Local Message] Call jittorllms fail 不能正常加载jittorllms的参数。')
65
+ raise RuntimeError("不能正常加载jittorllms的参数!")
66
+ print('load_model')
67
+ load_model()
68
+
69
+ # 进入任务等待状态
70
+ print('进入任务等待状态')
71
+ while True:
72
+ # 进入任务等待状态
73
+ kwargs = self.child.recv()
74
+ query = kwargs['query']
75
+ history = kwargs['history']
76
+ # 是否重置
77
+ if len(self.local_history) > 0 and len(history)==0:
78
+ print('触发重置')
79
+ self.jittorllms_model.reset()
80
+ self.local_history.append(query)
81
+
82
+ print('收到消息,开始请求')
83
+ try:
84
+ for response in self.jittorllms_model.stream_chat(query, history):
85
+ print(response)
86
+ self.child.send(response)
87
+ except:
88
+ from toolbox import trimmed_format_exc
89
+ print(trimmed_format_exc())
90
+ self.child.send('[Local Message] Call jittorllms fail.')
91
+ # 请求处理结束,开始下一个循环
92
+ self.child.send('[Finish]')
93
+
94
+ def stream_chat(self, **kwargs):
95
+ # 主进程执行
96
+ self.threadLock.acquire()
97
+ self.parent.send(kwargs)
98
+ while True:
99
+ res = self.parent.recv()
100
+ if res != '[Finish]':
101
+ yield res
102
+ else:
103
+ break
104
+ self.threadLock.release()
105
+
106
+ global llama_glm_handle
107
+ llama_glm_handle = None
108
+ #################################################################################
109
+ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
110
+ """
111
+ 多线程方法
112
+ 函数的说明请见 request_llm/bridge_all.py
113
+ """
114
+ global llama_glm_handle
115
+ if llama_glm_handle is None:
116
+ llama_glm_handle = GetGLMHandle()
117
+ if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + llama_glm_handle.info
118
+ if not llama_glm_handle.success:
119
+ error = llama_glm_handle.info
120
+ llama_glm_handle = None
121
+ raise RuntimeError(error)
122
+
123
+ # jittorllms 没有 sys_prompt 接口,因此把prompt加入 history
124
+ history_feedin = []
125
+ for i in range(len(history)//2):
126
+ history_feedin.append([history[2*i], history[2*i+1]] )
127
+
128
+ watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
129
+ response = ""
130
+ for response in llama_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
131
+ print(response)
132
+ if len(observe_window) >= 1: observe_window[0] = response
133
+ if len(observe_window) >= 2:
134
+ if (time.time()-observe_window[1]) > watch_dog_patience:
135
+ raise RuntimeError("程序终止。")
136
+ return response
137
+
138
+
139
+
140
+ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
141
+ """
142
+ 单线程方法
143
+ 函数的说明请见 request_llm/bridge_all.py
144
+ """
145
+ chatbot.append((inputs, ""))
146
+
147
+ global llama_glm_handle
148
+ if llama_glm_handle is None:
149
+ llama_glm_handle = GetGLMHandle()
150
+ chatbot[-1] = (inputs, load_message + "\n\n" + llama_glm_handle.info)
151
+ yield from update_ui(chatbot=chatbot, history=[])
152
+ if not llama_glm_handle.success:
153
+ llama_glm_handle = None
154
+ return
155
+
156
+ if additional_fn is not None:
157
+ import core_functional
158
+ importlib.reload(core_functional) # 热更新prompt
159
+ core_functional = core_functional.get_core_functions()
160
+ if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
161
+ inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
162
+
163
+ # 处理历史信息
164
+ history_feedin = []
165
+ for i in range(len(history)//2):
166
+ history_feedin.append([history[2*i], history[2*i+1]] )
167
+
168
+ # 开始接收jittorllms的回复
169
+ response = "[Local Message]: 等待jittorllms响应中 ..."
170
+ for response in llama_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
171
+ chatbot[-1] = (inputs, response)
172
+ yield from update_ui(chatbot=chatbot, history=history)
173
+
174
+ # 总结输出
175
+ if response == "[Local Message]: 等待jittorllms响应中 ...":
176
+ response = "[Local Message]: jittorllms响应异常 ..."
177
+ history.extend([inputs, response])
178
+ yield from update_ui(chatbot=chatbot, history=history)
request_llm/bridge_jittorllms_pangualpha.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from transformers import AutoModel, AutoTokenizer
3
+ import time
4
+ import threading
5
+ import importlib
6
+ from toolbox import update_ui, get_conf
7
+ from multiprocessing import Process, Pipe
8
+
9
+ load_message = "jittorllms尚未加载,加载需要一段时间。注意,请避免混用多种jittor模型,否则可能导致显存溢出而造成卡顿,取决于`config.py`的配置,jittorllms消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
10
+
11
+ #################################################################################
12
+ class GetGLMHandle(Process):
13
+ def __init__(self):
14
+ super().__init__(daemon=True)
15
+ self.parent, self.child = Pipe()
16
+ self.jittorllms_model = None
17
+ self.info = ""
18
+ self.local_history = []
19
+ self.success = True
20
+ self.check_dependency()
21
+ self.start()
22
+ self.threadLock = threading.Lock()
23
+
24
+ def check_dependency(self):
25
+ try:
26
+ import pandas
27
+ self.info = "依赖检测通过"
28
+ self.success = True
29
+ except:
30
+ from toolbox import trimmed_format_exc
31
+ self.info = r"缺少jittorllms的依赖,如果要使用jittorllms,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\
32
+ r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llm/jittorllms`两个指令来安装jittorllms的依赖(在项目根目录运行这两个指令)。" +\
33
+ r"警告:安装jittorllms依赖后将完全破坏现有的pytorch环境,建议使用docker环境!" + trimmed_format_exc()
34
+ self.success = False
35
+
36
+ def ready(self):
37
+ return self.jittorllms_model is not None
38
+
39
+ def run(self):
40
+ # 子进程执行
41
+ # 第一次运行,加载参数
42
+ def validate_path():
43
+ import os, sys
44
+ dir_name = os.path.dirname(__file__)
45
+ env = os.environ.get("PATH", "")
46
+ os.environ["PATH"] = env.replace('/cuda/bin', '/x/bin')
47
+ root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..')
48
+ os.chdir(root_dir_assume + '/request_llm/jittorllms')
49
+ sys.path.append(root_dir_assume + '/request_llm/jittorllms')
50
+ validate_path() # validate path so you can run from base directory
51
+
52
+ def load_model():
53
+ import types
54
+ try:
55
+ if self.jittorllms_model is None:
56
+ device, = get_conf('LOCAL_MODEL_DEVICE')
57
+ from .jittorllms.models import get_model
58
+ # availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"]
59
+ args_dict = {'model': 'pangualpha'}
60
+ print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))')
61
+ self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))
62
+ print('done get model')
63
+ except:
64
+ self.child.send('[Local Message] Call jittorllms fail 不能正常加载jittorllms的参数。')
65
+ raise RuntimeError("不能正常加载jittorllms的参数!")
66
+ print('load_model')
67
+ load_model()
68
+
69
+ # 进入任务等待状态
70
+ print('进入任务等待状态')
71
+ while True:
72
+ # 进入任务等待状态
73
+ kwargs = self.child.recv()
74
+ query = kwargs['query']
75
+ history = kwargs['history']
76
+ # 是否重置
77
+ if len(self.local_history) > 0 and len(history)==0:
78
+ print('触发重置')
79
+ self.jittorllms_model.reset()
80
+ self.local_history.append(query)
81
+
82
+ print('收到消息,开始请求')
83
+ try:
84
+ for response in self.jittorllms_model.stream_chat(query, history):
85
+ print(response)
86
+ self.child.send(response)
87
+ except:
88
+ from toolbox import trimmed_format_exc
89
+ print(trimmed_format_exc())
90
+ self.child.send('[Local Message] Call jittorllms fail.')
91
+ # 请求处理结束,开始下一个循环
92
+ self.child.send('[Finish]')
93
+
94
+ def stream_chat(self, **kwargs):
95
+ # 主进程执行
96
+ self.threadLock.acquire()
97
+ self.parent.send(kwargs)
98
+ while True:
99
+ res = self.parent.recv()
100
+ if res != '[Finish]':
101
+ yield res
102
+ else:
103
+ break
104
+ self.threadLock.release()
105
+
106
+ global pangu_glm_handle
107
+ pangu_glm_handle = None
108
+ #################################################################################
109
+ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
110
+ """
111
+ 多线程方法
112
+ 函数的说明请见 request_llm/bridge_all.py
113
+ """
114
+ global pangu_glm_handle
115
+ if pangu_glm_handle is None:
116
+ pangu_glm_handle = GetGLMHandle()
117
+ if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + pangu_glm_handle.info
118
+ if not pangu_glm_handle.success:
119
+ error = pangu_glm_handle.info
120
+ pangu_glm_handle = None
121
+ raise RuntimeError(error)
122
+
123
+ # jittorllms 没有 sys_prompt 接口,因此把prompt加入 history
124
+ history_feedin = []
125
+ for i in range(len(history)//2):
126
+ history_feedin.append([history[2*i], history[2*i+1]] )
127
+
128
+ watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
129
+ response = ""
130
+ for response in pangu_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
131
+ print(response)
132
+ if len(observe_window) >= 1: observe_window[0] = response
133
+ if len(observe_window) >= 2:
134
+ if (time.time()-observe_window[1]) > watch_dog_patience:
135
+ raise RuntimeError("程序终止。")
136
+ return response
137
+
138
+
139
+
140
+ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
141
+ """
142
+ 单线程方法
143
+ 函数的说明请见 request_llm/bridge_all.py
144
+ """
145
+ chatbot.append((inputs, ""))
146
+
147
+ global pangu_glm_handle
148
+ if pangu_glm_handle is None:
149
+ pangu_glm_handle = GetGLMHandle()
150
+ chatbot[-1] = (inputs, load_message + "\n\n" + pangu_glm_handle.info)
151
+ yield from update_ui(chatbot=chatbot, history=[])
152
+ if not pangu_glm_handle.success:
153
+ pangu_glm_handle = None
154
+ return
155
+
156
+ if additional_fn is not None:
157
+ import core_functional
158
+ importlib.reload(core_functional) # 热更新prompt
159
+ core_functional = core_functional.get_core_functions()
160
+ if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
161
+ inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
162
+
163
+ # 处理历史信息
164
+ history_feedin = []
165
+ for i in range(len(history)//2):
166
+ history_feedin.append([history[2*i], history[2*i+1]] )
167
+
168
+ # 开始接收jittorllms的回复
169
+ response = "[Local Message]: 等待jittorllms响应中 ..."
170
+ for response in pangu_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
171
+ chatbot[-1] = (inputs, response)
172
+ yield from update_ui(chatbot=chatbot, history=history)
173
+
174
+ # 总结输出
175
+ if response == "[Local Message]: 等待jittorllms响应中 ...":
176
+ response = "[Local Message]: jittorllms响应异常 ..."
177
+ history.extend([inputs, response])
178
+ yield from update_ui(chatbot=chatbot, history=history)
request_llm/bridge_jittorllms_rwkv.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from transformers import AutoModel, AutoTokenizer
3
+ import time
4
+ import threading
5
+ import importlib
6
+ from toolbox import update_ui, get_conf
7
+ from multiprocessing import Process, Pipe
8
+
9
+ load_message = "jittorllms尚未加载,加载需要一段时间。注意,请避免混用多种jittor模型,否则可能导致显存溢出而造成卡顿,取决于`config.py`的配置,jittorllms消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
10
+
11
+ #################################################################################
12
+ class GetGLMHandle(Process):
13
+ def __init__(self):
14
+ super().__init__(daemon=True)
15
+ self.parent, self.child = Pipe()
16
+ self.jittorllms_model = None
17
+ self.info = ""
18
+ self.local_history = []
19
+ self.success = True
20
+ self.check_dependency()
21
+ self.start()
22
+ self.threadLock = threading.Lock()
23
+
24
+ def check_dependency(self):
25
+ try:
26
+ import pandas
27
+ self.info = "依赖检测通过"
28
+ self.success = True
29
+ except:
30
+ from toolbox import trimmed_format_exc
31
+ self.info = r"缺少jittorllms的依赖,如果要使用jittorllms,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\
32
+ r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llm/jittorllms`两个指令来安装jittorllms的依赖(在项目根目录运行这两个指令)。" +\
33
+ r"警告:安装jittorllms依赖后将完全破坏现有的pytorch环境,建议使用docker环境!" + trimmed_format_exc()
34
+ self.success = False
35
+
36
+ def ready(self):
37
+ return self.jittorllms_model is not None
38
+
39
+ def run(self):
40
+ # 子进程执行
41
+ # 第一次运行,加载参数
42
+ def validate_path():
43
+ import os, sys
44
+ dir_name = os.path.dirname(__file__)
45
+ env = os.environ.get("PATH", "")
46
+ os.environ["PATH"] = env.replace('/cuda/bin', '/x/bin')
47
+ root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..')
48
+ os.chdir(root_dir_assume + '/request_llm/jittorllms')
49
+ sys.path.append(root_dir_assume + '/request_llm/jittorllms')
50
+ validate_path() # validate path so you can run from base directory
51
+
52
+ def load_model():
53
+ import types
54
+ try:
55
+ if self.jittorllms_model is None:
56
+ device, = get_conf('LOCAL_MODEL_DEVICE')
57
+ from .jittorllms.models import get_model
58
+ # availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"]
59
+ args_dict = {'model': 'chatrwkv'}
60
+ print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))')
61
+ self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))
62
+ print('done get model')
63
+ except:
64
+ self.child.send('[Local Message] Call jittorllms fail 不能正常加载jittorllms的参数。')
65
+ raise RuntimeError("不能正常加载jittorllms的参数!")
66
+ print('load_model')
67
+ load_model()
68
+
69
+ # 进入任务等待状态
70
+ print('进入任务等待状态')
71
+ while True:
72
+ # 进入任务等待状态
73
+ kwargs = self.child.recv()
74
+ query = kwargs['query']
75
+ history = kwargs['history']
76
+ # 是否重置
77
+ if len(self.local_history) > 0 and len(history)==0:
78
+ print('触发重置')
79
+ self.jittorllms_model.reset()
80
+ self.local_history.append(query)
81
+
82
+ print('收到消息,开始请求')
83
+ try:
84
+ for response in self.jittorllms_model.stream_chat(query, history):
85
+ print(response)
86
+ self.child.send(response)
87
+ except:
88
+ from toolbox import trimmed_format_exc
89
+ print(trimmed_format_exc())
90
+ self.child.send('[Local Message] Call jittorllms fail.')
91
+ # 请求处理结束,开始下一个循环
92
+ self.child.send('[Finish]')
93
+
94
+ def stream_chat(self, **kwargs):
95
+ # 主进程执行
96
+ self.threadLock.acquire()
97
+ self.parent.send(kwargs)
98
+ while True:
99
+ res = self.parent.recv()
100
+ if res != '[Finish]':
101
+ yield res
102
+ else:
103
+ break
104
+ self.threadLock.release()
105
+
106
+ global rwkv_glm_handle
107
+ rwkv_glm_handle = None
108
+ #################################################################################
109
+ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
110
+ """
111
+ 多线程方法
112
+ 函数的说明请见 request_llm/bridge_all.py
113
+ """
114
+ global rwkv_glm_handle
115
+ if rwkv_glm_handle is None:
116
+ rwkv_glm_handle = GetGLMHandle()
117
+ if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + rwkv_glm_handle.info
118
+ if not rwkv_glm_handle.success:
119
+ error = rwkv_glm_handle.info
120
+ rwkv_glm_handle = None
121
+ raise RuntimeError(error)
122
+
123
+ # jittorllms 没有 sys_prompt 接口,因此把prompt加入 history
124
+ history_feedin = []
125
+ for i in range(len(history)//2):
126
+ history_feedin.append([history[2*i], history[2*i+1]] )
127
+
128
+ watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
129
+ response = ""
130
+ for response in rwkv_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
131
+ print(response)
132
+ if len(observe_window) >= 1: observe_window[0] = response
133
+ if len(observe_window) >= 2:
134
+ if (time.time()-observe_window[1]) > watch_dog_patience:
135
+ raise RuntimeError("程序终止。")
136
+ return response
137
+
138
+
139
+
140
+ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
141
+ """
142
+ 单线程方法
143
+ 函数的说明请见 request_llm/bridge_all.py
144
+ """
145
+ chatbot.append((inputs, ""))
146
+
147
+ global rwkv_glm_handle
148
+ if rwkv_glm_handle is None:
149
+ rwkv_glm_handle = GetGLMHandle()
150
+ chatbot[-1] = (inputs, load_message + "\n\n" + rwkv_glm_handle.info)
151
+ yield from update_ui(chatbot=chatbot, history=[])
152
+ if not rwkv_glm_handle.success:
153
+ rwkv_glm_handle = None
154
+ return
155
+
156
+ if additional_fn is not None:
157
+ import core_functional
158
+ importlib.reload(core_functional) # 热更新prompt
159
+ core_functional = core_functional.get_core_functions()
160
+ if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
161
+ inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
162
+
163
+ # 处理历史信息
164
+ history_feedin = []
165
+ for i in range(len(history)//2):
166
+ history_feedin.append([history[2*i], history[2*i+1]] )
167
+
168
+ # 开始接收jittorllms的回复
169
+ response = "[Local Message]: 等待jittorllms响应中 ..."
170
+ for response in rwkv_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
171
+ chatbot[-1] = (inputs, response)
172
+ yield from update_ui(chatbot=chatbot, history=history)
173
+
174
+ # 总结输出
175
+ if response == "[Local Message]: 等待jittorllms响应中 ...":
176
+ response = "[Local Message]: jittorllms响应异常 ..."
177
+ history.extend([inputs, response])
178
+ yield from update_ui(chatbot=chatbot, history=history)