Skip to main content

LangChain Go 快速入门指南

免费内容:基础介绍

LangChain Go 是一个强大的框架,用于构建基于大语言模型的应用程序。它提供了一套完整的工具链,让开发者能够轻松地:

  • 🔗 连接各种大语言模型(OpenAI、Anthropic、本地模型等)
  • 🛠️ 创建复杂的工作流和链式调用
  • 💾 管理对话记忆和上下文
  • 🔍 集成向量数据库进行知识检索
  • 🤖 构建智能代理和工具调用

环境准备

首先确保您的环境已安装:

# 安装 Go 1.19+
go version

# 创建新项目
mkdir langchain-demo
cd langchain-demo
go mod init langchain-demo

# 安装 LangChain Go
go get github.com/tmc/langchaingo

核心组件架构

LangChain Go 的架构设计遵循模块化原则,主要包含以下核心组件:

1. LLM 抽象层

type LLM interface {
Call(ctx context.Context, prompt string, options ...CallOption) (string, error)
Generate(ctx context.Context, prompts []string, options ...CallOption) ([]*Generation, error)
}

2. 提示模板 (Prompt Templates)

package main

import (
"context"
"fmt"
"github.com/tmc/langchaingo/prompts"
)

func main() {
template := prompts.NewPromptTemplate(
"你是一个{{.role}}专家,请回答关于{{.topic}}的问题:{{.question}}",
[]string{"role", "topic", "question"},
)

prompt, err := template.Format(map[string]any{
"role": "Go语言",
"topic": "并发编程",
"question": "如何正确使用 channel?",
})

if err != nil {
panic(err)
}

fmt.Println(prompt)
// 输出:你是一个Go语言专家,请回答关于并发编程的问题:如何正确使用 channel?
}

3. 记忆管理

package main

import (
"github.com/tmc/langchaingo/memory"
"github.com/tmc/langchaingo/schema"
)

func main() {
// 创建对话缓冲记忆
conversationMemory := memory.NewConversationBuffer()

// 添加对话历史
conversationMemory.ChatHistory.AddUserMessage("你好")
conversationMemory.ChatHistory.AddAIMessage("你好!我是AI助手,有什么可以帮助您的吗?")

// 获取记忆变量
memoryVariables := conversationMemory.MemoryVariables()
fmt.Println("记忆变量:", memoryVariables)
}

实际应用示例

让我们创建一个简单的聊天机器人:

package main

import (
"context"
"fmt"
"log"

"github.com/tmc/langchaingo/llms/openai"
"github.com/tmc/langchaingo/memory"
"github.com/tmc/langchaingo/chains"
)

func main() {
// 初始化 OpenAI LLM
llm, err := openai.New()
if err != nil {
log.Fatal(err)
}

// 创建对话记忆
memory := memory.NewConversationBuffer()

// 创建对话链
conversation := chains.NewConversation(llm, memory)

// 开始对话
ctx := context.Background()

response1, err := conversation.Predict(ctx, map[string]any{
"input": "我想学习 Go 语言,有什么建议吗?",
})
if err != nil {
log.Fatal(err)
}
fmt.Println("AI:", response1)

response2, err := conversation.Predict(ctx, map[string]any{
"input": "那并发编程呢?",
})
if err != nil {
log.Fatal(err)
}
fmt.Println("AI:", response2)
}

高级链式操作

顺序链 (Sequential Chains)

package main

import (
"context"
"github.com/tmc/langchaingo/chains"
"github.com/tmc/langchaingo/llms/openai"
"github.com/tmc/langchaingo/prompts"
)

func createAnalysisChain() *chains.SequentialChain {
llm, _ := openai.New()

// 第一步:代码分析
analysisTemplate := prompts.NewPromptTemplate(
"分析以下Go代码的功能和潜在问题:\n{{.code}}\n\n分析结果:",
[]string{"code"},
)
analysisChain := chains.NewLLMChain(llm, analysisTemplate)

// 第二步:优化建议
optimizationTemplate := prompts.NewPromptTemplate(
"基于以下代码分析结果,提供具体的优化建议:\n{{.analysis}}\n\n优化建议:",
[]string{"analysis"},
)
optimizationChain := chains.NewLLMChain(llm, optimizationTemplate)

// 组合成顺序链
return chains.NewSequentialChain(
[]chains.Chain{analysisChain, optimizationChain},
[]string{"code"},
[]string{"analysis", "optimization"},
)
}

自定义工具开发

package main

import (
"context"
"encoding/json"
"fmt"
"net/http"
"time"

"github.com/tmc/langchaingo/tools"
)

// 天气查询工具
type WeatherTool struct {
APIKey string
}

func (w *WeatherTool) Name() string {
return "weather_query"
}

func (w *WeatherTool) Description() string {
return "查询指定城市的当前天气信息。输入参数:城市名称(中文或英文)"
}

func (w *WeatherTool) Call(ctx context.Context, input string) (string, error) {
// 实际应该调用天气API,这里使用模拟数据
weatherData := map[string]interface{}{
"city": input,
"temperature": "25°C",
"condition": "晴朗",
"humidity": "60%",
"time": time.Now().Format("2006-01-02 15:04:05"),
}

result, err := json.MarshalIndent(weatherData, "", " ")
if err != nil {
return "", err
}

return fmt.Sprintf("城市 %s 的天气信息:\n%s", input, string(result)), nil
}

// 代码执行工具
type CodeExecutorTool struct{}

func (c *CodeExecutorTool) Name() string {
return "go_code_executor"
}

func (c *CodeExecutorTool) Description() string {
return "执行Go代码片段并返回结果。输入:完整的Go代码"
}

func (c *CodeExecutorTool) Call(ctx context.Context, input string) (string, error) {
// 注意:实际生产环境中需要使用沙箱环境执行代码
// 这里只是示例,返回模拟结果
return fmt.Sprintf("代码执行结果:\n```\n// 模拟执行结果\n程序正常执行\n```"), nil
}

Agent 智能代理

package main

import (
"context"
"log"

"github.com/tmc/langchaingo/agents"
"github.com/tmc/langchaingo/llms/openai"
"github.com/tmc/langchaingo/tools"
)

func createSmartAgent() *agents.Executor {
llm, err := openai.New()
if err != nil {
log.Fatal(err)
}

// 准备工具集
toolsList := []tools.Tool{
&WeatherTool{APIKey: "your-api-key"},
&CodeExecutorTool{},
tools.Calculator{}, // 内置计算器工具
}

// 创建 ReAct Agent
agent := agents.NewZeroShotReactAgent(
llm,
toolsList,
agents.WithMaxIterations(5),
)

// 创建执行器
return agents.NewExecutor(agent, toolsList)
}

func main() {
executor := createSmartAgent()
ctx := context.Background()

// 复杂查询:需要使用多个工具
result, err := executor.Call(ctx, map[string]any{
"input": "查询北京的天气,然后计算如果温度是25度,转换为华氏度是多少",
})

if err != nil {
log.Fatal(err)
}

fmt.Println("智能代理回答:", result)
}

向量数据库集成

文档加载和向量化

package main

import (
"context"
"log"

"github.com/tmc/langchaingo/documentloaders"
"github.com/tmc/langchaingo/embeddings"
"github.com/tmc/langchaingo/embeddings/openai"
"github.com/tmc/langchaingo/textsplitter"
"github.com/tmc/langchaingo/vectorstores"
"github.com/tmc/langchaingo/vectorstores/qdrant"
)

func buildKnowledgeBase() {
ctx := context.Background()

// 1. 加载文档
loader := documentloaders.NewText("./docs/golang-tutorial.md")
documents, err := loader.Load(ctx)
if err != nil {
log.Fatal(err)
}

// 2. 文本分割
splitter := textsplitter.NewRecursiveCharacter()
splitter.ChunkSize = 1000
splitter.ChunkOverlap = 200

splits, err := splitter.SplitDocuments(documents)
if err != nil {
log.Fatal(err)
}

// 3. 创建嵌入模型
embedder, err := openai.NewOpenAI()
if err != nil {
log.Fatal(err)
}

// 4. 连接向量数据库
store, err := qdrant.New(
qdrant.WithURL("http://localhost:6333"),
qdrant.WithCollectionName("golang_knowledge"),
qdrant.WithEmbedder(embedder),
)
if err != nil {
log.Fatal(err)
}

// 5. 存储文档向量
_, err = store.AddDocuments(ctx, splits)
if err != nil {
log.Fatal(err)
}

log.Println("知识库构建完成!")
}

微服务架构设计

服务层设计

package main

import (
"context"
"encoding/json"
"log"
"net/http"
"sync"
"time"

"github.com/gin-gonic/gin"
"github.com/tmc/langchaingo/llms/openai"
)

// LLM连接池
type LLMPool struct {
pool chan *openai.LLM
mu sync.Mutex
}

func NewLLMPool(size int) *LLMPool {
pool := make(chan *openai.LLM, size)

for i := 0; i < size; i++ {
llm, err := openai.New()
if err != nil {
log.Fatal(err)
}
pool <- llm
}

return &LLMPool{pool: pool}
}

func (p *LLMPool) Get() *openai.LLM {
return <-p.pool
}

func (p *LLMPool) Put(llm *openai.LLM) {
p.pool <- llm
}

// 限流器
type RateLimiter struct {
requests map[string]*UserLimit
mu sync.RWMutex
}

type UserLimit struct {
count int
resetTime time.Time
}

func NewRateLimiter() *RateLimiter {
return &RateLimiter{
requests: make(map[string]*UserLimit),
}
}

func (rl *RateLimiter) Allow(userID string, limit int, window time.Duration) bool {
rl.mu.Lock()
defer rl.mu.Unlock()

now := time.Now()
userLimit, exists := rl.requests[userID]

if !exists || now.After(userLimit.resetTime) {
rl.requests[userID] = &UserLimit{
count: 1,
resetTime: now.Add(window),
}
return true
}

if userLimit.count >= limit {
return false
}

userLimit.count++
return true
}

// 服务主体
type LangChainService struct {
llmPool *LLMPool
rateLimiter *RateLimiter
}

func NewLangChainService() *LangChainService {
return &LangChainService{
llmPool: NewLLMPool(10), // 10个连接
rateLimiter: NewRateLimiter(),
}
}

func (s *LangChainService) HandleQuery(c *gin.Context) {
userID := c.GetHeader("X-User-ID")
if userID == "" {
c.JSON(http.StatusUnauthorized, gin.H{"error": "Missing user ID"})
return
}

// 限流检查:每分钟最多20次请求
if !s.rateLimiter.Allow(userID, 20, time.Minute) {
c.JSON(http.StatusTooManyRequests, gin.H{"error": "Rate limit exceeded"})
return
}

var request struct {
Query string `json:"query"`
}

if err := c.ShouldBindJSON(&request); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}

// 获取LLM实例
llm := s.llmPool.Get()
defer s.llmPool.Put(llm)

// 处理查询
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()

start := time.Now()
response, err := llm.Call(ctx, request.Query)
duration := time.Since(start)

if err != nil {
log.Printf("LLM调用失败: %v, 耗时: %v", err, duration)
c.JSON(http.StatusInternalServerError, gin.H{"error": "LLM call failed"})
return
}

log.Printf("LLM调用成功, 用户: %s, 耗时: %v", userID, duration)

c.JSON(http.StatusOK, gin.H{
"response": response,
"duration": duration.Milliseconds(),
})
}

监控和指标收集

package main

import (
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)

var (
// 请求计数器
requestsTotal = promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "langchain_requests_total",
Help: "The total number of processed requests",
},
[]string{"user_id", "status"},
)

// 响应时间直方图
requestDuration = promauto.NewHistogramVec(
prometheus.HistogramOpts{
Name: "langchain_request_duration_seconds",
Help: "The request duration in seconds",
Buckets: prometheus.DefBuckets,
},
[]string{"user_id"},
)

// LLM池使用率
llmPoolUsage = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "langchain_llm_pool_usage",
Help: "Current usage of LLM pool",
},
)
)

// 中间件:记录指标
func metricsMiddleware() gin.HandlerFunc {
return gin.HandlerFunc(func(c *gin.Context) {
start := time.Now()
userID := c.GetHeader("X-User-ID")

c.Next()

duration := time.Since(start)
status := "success"
if c.Writer.Status() >= 400 {
status = "error"
}

requestsTotal.WithLabelValues(userID, status).Inc()
requestDuration.WithLabelValues(userID).Observe(duration.Seconds())
})
}

部署配置

Docker Compose 配置

version: '3.8'

services:
langchain-api:
build: .
ports:
- "8080:8080"
environment:
- OPENAI_API_KEY=${OPENAI_API_KEY}
- REDIS_URL=redis:6379
- DB_URL=postgres://user:pass@postgres:5432/langchain
depends_on:
- redis
- postgres
- qdrant
deploy:
replicas: 3
resources:
limits:
memory: 1G
cpus: '0.5'

redis:
image: redis:7-alpine
ports:
- "6379:6379"

postgres:
image: postgres:15
environment:
POSTGRES_DB: langchain
POSTGRES_USER: user
POSTGRES_PASSWORD: pass
volumes:
- postgres_data:/var/lib/postgresql/data

qdrant:
image: qdrant/qdrant:latest
ports:
- "6333:6333"
volumes:
- qdrant_data:/qdrant/storage

prometheus:
image: prom/prometheus
ports:
- "9090:9090"
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml

grafana:
image: grafana/grafana
ports:
- "3000:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin

volumes:
postgres_data:
qdrant_data:

Kubernetes 部署清单

apiVersion: apps/v1
kind: Deployment
metadata:
name: langchain-api
spec:
replicas: 5
selector:
matchLabels:
app: langchain-api
template:
metadata:
labels:
app: langchain-api
spec:
containers:
- name: langchain-api
image: langchain-api:latest
ports:
- containerPort: 8080
env:
- name: OPENAI_API_KEY
valueFrom:
secretKeyRef:
name: api-secrets
key: openai-key
resources:
requests:
memory: "512Mi"
cpu: "250m"
limits:
memory: "1Gi"
cpu: "500m"
livenessProbe:
httpGet:
path: /health
port: 8080
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /ready
port: 8080
initialDelaySeconds: 5
periodSeconds: 5
---
apiVersion: v1
kind: Service
metadata:
name: langchain-api-service
spec:
selector:
app: langchain-api
ports:
- port: 80
targetPort: 8080
type: LoadBalancer

附录:配套资源

所有示例代码都可以在 GitHub 仓库 中找到。

快速开始模板

# 克隆模板项目
git clone https://github.com/coding520/langchain-go-starter
cd langchain-go-starter

# 安装依赖
go mod tidy

# 设置环境变量
export OPENAI_API_KEY="your-api-key"

# 运行示例
go run main.go

学习路径建议

  1. 基础阶段:掌握 LLM 调用、提示模板、记忆管理
  2. 进阶阶段:学习链式操作、工具集成、Agent 开发
  3. 高级阶段:向量数据库、RAG 系统、性能优化
  4. 企业阶段:微服务架构、监控部署、生产运维

每个阶段都有对应的付费内容和实战项目,帮助您循序渐进地掌握 LangChain Go 的完整技能栈。