Skip to content

Commit

Permalink
docs(README): update
Browse files Browse the repository at this point in the history
  • Loading branch information
tpoisonooo committed Jan 8, 2024
1 parent 110e32a commit 7f34008
Show file tree
Hide file tree
Showing 3 changed files with 27 additions and 27 deletions.
17 changes: 12 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ reject query: 茴香豆是怎么做的

茴香豆使用了搜索引擎,点击 [serper 官网](https://serper.dev/api-key)获取限额 WEB_SEARCH_TOKEN,填入 `config.ini`

```shell
```bash
# config.ini
..
[web_search]
Expand Down Expand Up @@ -90,7 +90,7 @@ x_api_key = "${YOUR-X-API-KEY}"

点击[创建飞书自定义机器人](https://open.feishu.cn/document/client-docs/bot-v3/add-custom-bot),获取回调 WEBHOOK_URL,填写到 config.ini

```shell
```bash
# config.ini
..
[frontend]
Expand All @@ -99,7 +99,7 @@ webhook_url = "${YOUR-LARK-WEBHOOK-URL}"
```

运行。结束后,技术助手的答复将发送到飞书群。
```shell
```bash
python3 main.py workdir
```
<img src="./resource/figures/lark-example.png" width="400">
Expand Down Expand Up @@ -200,5 +200,12 @@ python3 main.py workdir
此时无法运行 local LLM,只能用 remote LLM 配合 text2vec 执行 pipeline。请确保 `config.ini` 只使用 remote LLM,关闭 local LLM
# 📝 License
项目使用 [GPL 3-License](./LICENSE)
# 📝 引用
```bash
@misc{2024HuixiangDou,
title={HuixiangDou: Overcoming Group Chat Scenarios with LLM-based Technical Assistance},
author={HuixiangDou Contributors},
howpublished = {\url{https://github.com/internlm/huixiangdou}},
year={2023}
}
```
11 changes: 9 additions & 2 deletions README_en.md
Original file line number Diff line number Diff line change
Expand Up @@ -198,5 +198,12 @@ In order to further improve the assistant's answering experience, the more of th
In this case, you can't run local LLM, only remote LLM combined with text2vec to execute pipeline. Make sure `config.ini` only uses remote LLM, and turn off local LLM.

# 📝 License
The project uses the [GPL 3 License](./LICENSE).
# 📝 Reference
```bash
@misc{2024HuixiangDou,
title={HuixiangDou: Overcoming Group Chat Scenarios with LLM-based Technical Assistance},
author={HuixiangDou Contributors},
howpublished = {\url{https://github.com/internlm/huixiangdou}},
year={2023}
}
```
26 changes: 6 additions & 20 deletions config.ini
Original file line number Diff line number Diff line change
@@ -1,34 +1,24 @@
[feature_store]
reject_throttle = 767.0
# text2vec model path, support local relative path and huggingface model format
model_path = "shibing624/text2vec-base-chinese"
model_path = "../models/text2vec-large-chinese"
work_dir = "workdir"

[web_search]
# check https://serper.dev/api-key to get a free API key
x_api_key = "${YOUR-API-KEY}"
x_api_key = "aa3da0cd69c5a2df7c0b664dc8a4c118de532405"
domain_partial_order = ["openai.com", "pytorch.org", "readthedocs.io", "nvidia.com", "stackoverflow.com", "juejin.cn", "zhuanlan.zhihu.com", "www.cnblogs.com"]
save_dir = "logs/web_search_result"

[llm]
enable_local = 1
enable_remote = 0
# hybrid llm service address
client_url = "http://127.0.0.1:8888/inference"
client_url = "http://10.140.24.142:39999/inference"

[llm.server]
# local LLM configuration
# support "internlm2-7B", "internlm2-20B" and "internlm2-70B"
local_llm_path = "/internlm/ampere_7b_v1_7_0"
local_llm_max_text_length = 16000

# remote LLM service configuration
# support any python3 openai interface, such as "gpt", "kimi" and so on
remote_type = "kimi"
remote_api_key = "${YOUR-API-KEY}"
# max text length for remote LLM. for example, use 128000 for kimi, 192000 for gpt
remote_api_key = "Y2tpMG41dDB0YzExbjRqYW5nN2c6bXNrLTFzVlB2NGJRaDExeWdnNTlZY3dYMm5mcVRpWng="
remote_llm_max_text_length = 128000
# openai model type. use "moonshot-v1-128k" for kimi, "gpt-4" for gpt
remote_llm_model = "moonshot-v1-128k"
bind_port = 8888

Expand All @@ -43,9 +33,8 @@ has_weekday = 1

[sg_search]
binary_src_path = "/usr/local/bin/src"
src_access_token = "${YOUR-SRC-ACCESS-TOKEN}"
src_access_token = "sgp_636f79ad2075640f_3ef2a135579615403e29b88d4402f1e6183ad347"

# add your repo here, we just take opencompass and lmdeploy as example
[sg_search.opencompass]
github_repo_id = "open-compass/opencompass"
introduction = "用于评测大型语言模型(LLM). 它提供了完整的开源可复现的评测框架,支持大语言模型、多模态模型的一站式评测,基于分布式技术,对大参数量模型亦能实现高效评测。评测方向汇总为知识、语言、理解、推理、考试五大能力维度,整合集纳了超过70个评测数据集,合计提供了超过40万个模型评测问题,并提供长文本、安全、代码3类大模型特色技术能力评测。"
Expand All @@ -55,8 +44,5 @@ github_repo_id = "internlm/lmdeploy"
introduction = "lmdeploy 是一个用于压缩、部署和服务 LLM(Large Language Model)的工具包。是一个服务端场景下,transformer 结构 LLM 部署工具,支持 GPU 服务端部署,速度有保障,支持 Tensor Parallel,多并发优化,功能全面,包括模型转换、缓存历史会话的 cache feature 等. 它还提供了 WebUI、命令行和 gRPC 客户端接入。"

[frontend]
# chat group type, support "lark" and "none"
# check https://open.feishu.cn/document/client-docs/bot-v3/add-custom-bot to add lark bot
type = "none"
# char group webhook url, send reply to group
webhook_url = "https://open.feishu.cn/open-apis/bot/v2/hook/7a5d3d98-fdfd-40f8-b8de-851cb7e81e5c"
webhook_url = "https://open.feishu.cn/open-apis/bot/v2/hook/7a5d3d98-fdfd-40f8-b8de-851cb7e81e5c"

0 comments on commit 7f34008

Please sign in to comment.