diff --git a/plugin/aichat/main.go b/plugin/aichat/main.go index dbb9e9aa..fc78af58 100644 --- a/plugin/aichat/main.go +++ b/plugin/aichat/main.go @@ -2,6 +2,7 @@ package aichat import ( + "errors" "math/rand" "strconv" "strings" @@ -46,7 +47,9 @@ var ( "- 设置AI聊天(不)以AI语音输出\n" + "- 查看AI聊天配置\n" + "- 重置AI聊天\n" + - "- 群聊总结 [消息数目]|群聊总结 1000\n", + "- 群聊总结 [消息数目]|群聊总结 1000\n" + + "- /gpt [内容] (使用大模型聊天)\n", + PrivateDataFolder: "aichat", }) ) @@ -61,6 +64,32 @@ var ( limit = ctxext.NewLimiterManager(time.Second*30, 1) ) +// getModelParams 获取模型参数:温度(float32(temp)/100)、TopP和最大长度 +func getModelParams(temp int64) (temperature float32, topp float32, maxn uint) { + // 处理温度参数 + if temp <= 0 { + temp = 70 // default setting + } + if temp > 100 { + temp = 100 + } + temperature = float32(temp) / 100 + + // 处理TopP参数 + topp = cfg.TopP + if topp == 0 { + topp = 0.9 + } + + // 处理最大长度参数 + maxn = cfg.MaxN + if maxn == 0 { + maxn = 4096 + } + + return temperature, topp, maxn +} + func init() { en.OnMessage(ensureconfig, func(ctx *zero.Ctx) bool { return ctx.ExtractPlainText() != "" && @@ -88,39 +117,25 @@ func init() { return } - if temp <= 0 { - temp = 70 // default setting - } - if temp > 100 { - temp = 100 - } + temperature, topp, maxn := getModelParams(temp) x := deepinfra.NewAPI(cfg.API, cfg.Key) var mod model.Protocol - maxn := cfg.MaxN - if maxn == 0 { - maxn = 4096 - } - topp := cfg.TopP - if topp == 0 { - topp = 0.9 - } - switch cfg.Type { case 0: mod = model.NewOpenAI( cfg.ModelName, cfg.Separator, - float32(temp)/100, topp, maxn, + temperature, topp, maxn, ) case 1: mod = model.NewOLLaMA( cfg.ModelName, cfg.Separator, - float32(temp)/100, topp, maxn, + temperature, topp, maxn, ) case 2: mod = model.NewGenAI( cfg.ModelName, - float32(temp)/100, topp, maxn, + temperature, topp, maxn, ) default: logrus.Warnln("[aichat] unsupported AI type", cfg.Type) @@ -319,6 +334,16 @@ func init() { // 添加群聊总结功能 en.OnRegex(`^群聊总结\s?(\d*)$`, ensureconfig, zero.OnlyGroup, zero.AdminPermission).SetBlock(true).Limit(limit.LimitByGroup).Handle(func(ctx *zero.Ctx) { ctx.SendChain(message.Text("少女思考中...")) + gid := ctx.Event.GroupID + if gid == 0 { + gid = -ctx.Event.UserID + } + c, ok := ctx.State["manager"].(*ctrl.Control[*zero.Ctx]) + if !ok { + return + } + rate := c.GetData(gid) + temp := (rate >> 8) & 0xff p, _ := strconv.ParseInt(ctx.State["regex_matched"].([]string)[1], 10, 64) if p > 1000 { p = 1000 @@ -326,10 +351,9 @@ func init() { if p == 0 { p = 200 } - gid := ctx.Event.GroupID group := ctx.GetGroupInfo(gid, false) if group.MemberCount == 0 { - ctx.SendChain(message.Text(zero.BotConfig.NickName[0], "未加入", group.Name, "(", gid, "),无法获取摘要")) + ctx.SendChain(message.Text(zero.BotConfig.NickName[0], "未加入", group.Name, "(", gid, "),无法获取总结")) return } @@ -350,8 +374,13 @@ func init() { return } - // 调用大模型API进行摘要 - summary, err := summarizeMessages(messages) + // 构造总结请求提示 + summaryPrompt := "请总结这个群聊内容,要求按发言顺序梳理,明确标注每个发言者的昵称,并完整呈现其核心观点、提出的问题、发表的看法或做出的回应,确保不遗漏关键信息,且能体现成员间的对话逻辑和互动关系:\n" + + strings.Join(messages, "\n") + + // 调用大模型API进行总结 + summary, err := llmchat(summaryPrompt, temp) + if err != nil { ctx.SendChain(message.Text("ERROR: ", err)) return @@ -367,13 +396,108 @@ func init() { b.WriteString(" 条消息总结:\n\n") b.WriteString(summary) - // 分割总结内容为多段 - parts := strings.Split(b.String(), "\n\n") - msg := make(message.Message, 0, len(parts)) - for _, part := range parts { - if part != "" { - msg = append(msg, ctxext.FakeSenderForwardNode(ctx, message.Text(part))) + // 分割总结内容为多段(按1000字符长度切割) + summaryText := b.String() + msg := make(message.Message, 0) + for len(summaryText) > 0 { + if len(summaryText) <= 1000 { + msg = append(msg, ctxext.FakeSenderForwardNode(ctx, message.Text(summaryText))) + break } + + // 查找1000字符内的最后一个换行符,尽量在换行处分割 + chunk := summaryText[:1000] + lastNewline := strings.LastIndex(chunk, "\n") + if lastNewline > 0 { + chunk = summaryText[:lastNewline+1] + } + + msg = append(msg, ctxext.FakeSenderForwardNode(ctx, message.Text(chunk))) + summaryText = summaryText[len(chunk):] + } + if len(msg) > 0 { + ctx.Send(msg) + } + }) + + // 添加 /gpt 命令处理(同时支持回复消息和直接使用) + en.OnKeyword("/gpt", ensureconfig).SetBlock(true).Handle(func(ctx *zero.Ctx) { + gid := ctx.Event.GroupID + if gid == 0 { + gid = -ctx.Event.UserID + } + c, ok := ctx.State["manager"].(*ctrl.Control[*zero.Ctx]) + if !ok { + return + } + rate := c.GetData(gid) + temp := (rate >> 8) & 0xff + text := ctx.MessageString() + + var query string + var replyContent string + + // 检查是否是回复消息 (使用MessageElement检查而不是CQ码) + for _, elem := range ctx.Event.Message { + if elem.Type == "reply" { + // 提取被回复的消息ID + replyIDStr := elem.Data["id"] + replyID, err := strconv.ParseInt(replyIDStr, 10, 64) + if err == nil { + // 获取被回复的消息内容 + replyMsg := ctx.GetMessage(replyID) + if replyMsg.Elements != nil { + replyContent = replyMsg.Elements.ExtractPlainText() + } + } + break // 找到回复元素后退出循环 + } + } + + // 提取 /gpt 后面的内容 + parts := strings.SplitN(text, "/gpt", 2) + + var gContent string + if len(parts) > 1 { + gContent = strings.TrimSpace(parts[1]) + } + + // 组合内容:优先使用回复内容,如果同时有/gpt内容则拼接 + switch { + case replyContent != "" && gContent != "": + query = replyContent + "\n" + gContent + case replyContent != "": + query = replyContent + case gContent != "": + query = gContent + default: + return + } + + // 调用大模型API进行聊天 + reply, err := llmchat(query, temp) + if err != nil { + ctx.SendChain(message.Text("ERROR: ", err)) + return + } + + // 分割总结内容为多段(按1000字符长度切割) + msg := make(message.Message, 0) + for len(reply) > 0 { + if len(reply) <= 1000 { + msg = append(msg, ctxext.FakeSenderForwardNode(ctx, message.Text(reply))) + break + } + + // 查找1000字符内的最后一个换行符,尽量在换行处分割 + chunk := reply[:1000] + lastNewline := strings.LastIndex(chunk, "\n") + if lastNewline > 0 { + chunk = reply[:lastNewline+1] + } + + msg = append(msg, ctxext.FakeSenderForwardNode(ctx, message.Text(chunk))) + reply = reply[len(chunk):] } if len(msg) > 0 { ctx.Send(msg) @@ -381,20 +505,34 @@ func init() { }) } -// summarizeMessages 调用大模型API进行消息摘要 -func summarizeMessages(messages []string) (string, error) { - // 使用现有的AI配置进行摘要 +// llmchat 调用大模型API包装 +func llmchat(prompt string, temp int64) (string, error) { + temperature, topp, maxn := getModelParams(temp) // 使用默认温度70 + x := deepinfra.NewAPI(cfg.API, cfg.Key) - mod := model.NewOpenAI( - cfg.ModelName, cfg.Separator, - float32(70)/100, 0.9, 4096, - ) + var mod model.Protocol + switch cfg.Type { + case 0: + mod = model.NewOpenAI( + cfg.ModelName, cfg.Separator, + temperature, topp, maxn, + ) + case 1: + mod = model.NewOLLaMA( + cfg.ModelName, cfg.Separator, + temperature, topp, maxn, + ) + case 2: + mod = model.NewGenAI( + cfg.ModelName, + temperature, topp, maxn, + ) + default: + logrus.Warnln("[aichat] unsupported AI type", cfg.Type) + return "", errors.New("不支持的AI类型") + } - // 构造摘要请求提示 - summaryPrompt := "请总结这个群聊内容,要求按发言顺序梳理,明确标注每个发言者的昵称,并完整呈现其核心观点、提出的问题、发表的看法或做出的回应,确保不遗漏关键信息,且能体现成员间的对话逻辑和互动关系:\n\n" + - strings.Join(messages, "\n---\n") - - data, err := x.Request(mod.User(summaryPrompt)) + data, err := x.Request(mod.User(prompt)) if err != nil { return "", err }