diff --git a/README.md b/README.md
index 3cfc9466..219aded0 100644
--- a/README.md
+++ b/README.md
@@ -6,8 +6,11 @@
> **启航电商ERP系统3.0版本在原版本基础上新增自动任务拉取订单、订单自动导入订单库等重大升级,进一步提升了开箱即用的特性。**
+> **启航电商ERP系统正在重构AI原生ERP系统。**
## 一、系统介绍
+**启航电商ERP系统正在重构AI原生ERP系统。**
+
#### 项目定位
启航电商ERP系统是一个驱动电商企业数字化转型的电商业务中台系统底座。
diff --git a/api/ai-agent/pom.xml b/api/ai-agent/pom.xml
index d4e9568d..68341d95 100644
--- a/api/ai-agent/pom.xml
+++ b/api/ai-agent/pom.xml
@@ -142,7 +142,18 @@
-
+
+
+ dev.langchain4j
+ langchain4j
+ 1.11.0
+
+
+
+ dev.langchain4j
+ langchain4j-ollama
+ 1.11.0
+
@@ -153,4 +164,4 @@
-
+
\ No newline at end of file
diff --git a/api/ai-agent/src/main/java/cn/qihangerp/erp/controller/SseController.java b/api/ai-agent/src/main/java/cn/qihangerp/erp/controller/SseController.java
index 0da6114e..2afbfa3f 100644
--- a/api/ai-agent/src/main/java/cn/qihangerp/erp/controller/SseController.java
+++ b/api/ai-agent/src/main/java/cn/qihangerp/erp/controller/SseController.java
@@ -64,13 +64,13 @@ public class SseController {
}
@GetMapping("/send")
- public String sendMessage(@RequestParam String clientId, @RequestParam String message) {
+ public String sendMessage(@RequestParam String clientId, @RequestParam String message, @RequestParam(required = false, defaultValue = "llama3") String model) {
log.info("=============来新消息了!");
SseEmitter emitter = emitters.get(clientId);
if (emitter != null) {
try {
- // 使用AiService处理消息
- String response = aiService.processMessage(message);
+ // 使用AiService处理消息,传递模型参数
+ String response = aiService.processMessage(message, model);
emitter.send(SseEmitter.event()
.name("message")
diff --git a/api/ai-agent/src/main/java/cn/qihangerp/erp/serviceImpl/AiService.java b/api/ai-agent/src/main/java/cn/qihangerp/erp/serviceImpl/AiService.java
index af8998e4..165ecc9b 100644
--- a/api/ai-agent/src/main/java/cn/qihangerp/erp/serviceImpl/AiService.java
+++ b/api/ai-agent/src/main/java/cn/qihangerp/erp/serviceImpl/AiService.java
@@ -1,67 +1,45 @@
package cn.qihangerp.erp.serviceImpl;
-import cn.qihangerp.common.ResultVo;
-import com.alibaba.fastjson2.JSONObject;
+import dev.langchain4j.model.ollama.OllamaChatModel;
import org.springframework.stereotype.Service;
-import org.springframework.util.StringUtils;
-
-import java.io.IOException;
-import java.net.URI;
-import java.net.http.HttpClient;
-import java.net.http.HttpRequest;
-import java.net.http.HttpResponse;
+import java.time.Duration;
/**
- * AI服务类,直接调用Ollama API处理聊天内容
+ * AI服务类,使用LangChain4J调用Ollama模型处理聊天内容
*/
@Service
public class AiService {
- private final HttpClient httpClient;
- private final String ollamaUrl;
-
- public AiService() {
- this.httpClient = HttpClient.newHttpClient();
- this.ollamaUrl = "http://localhost:11434/api/generate";
+ /**
+ * 处理聊天消息
+ * @param message 用户消息
+ * @param model 模型名称
+ * @return AI回复
+ */
+ public String processMessage(String message, String model) {
+ try {
+ // 根据模型名称创建对应的ChatModel
+ OllamaChatModel modelInstance = OllamaChatModel.builder()
+ .baseUrl("http://localhost:11434") // Ollama默认端口
+ .modelName(model) // 使用指定的模型
+ .temperature(0.7)
+ .timeout(Duration.ofSeconds(300)) // 超时时间设置为300秒(5分钟)
+ .build();
+
+ // 调用Ollama模型获取回复
+ return modelInstance.chat(message);
+ } catch (Exception e) {
+ e.printStackTrace();
+ return "错误: " + e.getMessage();
+ }
}
/**
- * 处理聊天消息
+ * 处理聊天消息(使用默认模型)
* @param message 用户消息
* @return AI回复
*/
public String processMessage(String message) {
- try {
- // 构建请求体
- JSONObject requestBody = new JSONObject();
- requestBody.put("model", "llama3");
- requestBody.put("prompt", message);
- requestBody.put("stream", false);
- requestBody.put("temperature", 0.7);
-
- // 创建HTTP请求
- HttpRequest request = HttpRequest.newBuilder()
- .uri(URI.create(ollamaUrl))
- .header("Content-Type", "application/json")
- .POST(HttpRequest.BodyPublishers.ofString(requestBody.toJSONString()))
- .build();
-
- // 发送请求并获取响应
- HttpResponse response = httpClient.send(request, HttpResponse.BodyHandlers.ofString());
-
- // 解析响应
- JSONObject responseBody = JSONObject.parseObject(response.body());
-
- // 检查是否有错误
- if (responseBody.containsKey("error")) {
- String errorMessage = responseBody.getString("error");
- return "错误: " + errorMessage;
- }
-
- return responseBody.getString("response");
- } catch (IOException | InterruptedException e) {
- e.printStackTrace();
- return "抱歉,我暂时无法处理您的请求,请稍后重试。";
- }
+ return processMessage(message, "llama3");
}
}
diff --git a/docs/preview.png b/docs/preview.png
index b046d2ed..456bb9a2 100644
Binary files a/docs/preview.png and b/docs/preview.png differ
diff --git a/vue/src/views/index.vue b/vue/src/views/index.vue
index a231b9f0..9ba39f43 100644
--- a/vue/src/views/index.vue
+++ b/vue/src/views/index.vue
@@ -8,6 +8,13 @@
工作助手
@@ -166,7 +173,8 @@ export default {
sse: null,
clientId: '',
isSseConnected: false,
- isLoading: false
+ isLoading: false,
+ selectedModel: 'qwen3.5:2b'
}
},
mounted() {
@@ -259,13 +267,21 @@ export default {
// 通过SSE发送消息到后端
if (this.isSseConnected) {
+ // 使用AbortController实现超时
+ const controller = new AbortController();
+ const timeoutId = setTimeout(() => controller.abort(), 300000); // 300秒超时
+
// 使用fetch发送消息
- fetch(`${process.env.VUE_APP_BASE_API}/api/ai-agent/sse/send?clientId=${this.clientId}&message=${encodeURIComponent(this.inputMessage)}&token=${token}`)
+ fetch(`${process.env.VUE_APP_BASE_API}/api/ai-agent/sse/send?clientId=${this.clientId}&message=${encodeURIComponent(this.inputMessage)}&model=${this.selectedModel}&token=${token}`, {
+ signal: controller.signal
+ })
.then(response => response.text())
.then(data => {
+ clearTimeout(timeoutId);
console.log('消息发送结果:', data);
})
.catch(error => {
+ clearTimeout(timeoutId);
console.error('消息发送失败:', error);
// 发送失败时使用模拟回复
this.generateReply(this.inputMessage);