forked from teilomillet/gollm
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathprompt_optimization.go
133 lines (113 loc) · 3.56 KB
/
prompt_optimization.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
// File: gollm/prompt_optimization.go
package gollm
import (
"context"
"fmt"
"sync"
"time"
)
// OptimizationConfig holds the configuration for prompt optimization
type OptimizationConfig struct {
Prompt string
Description string
Metrics []Metric
RatingSystem string
Threshold float64
MaxRetries int
RetryDelay time.Duration
}
// DefaultOptimizationConfig returns a default configuration for prompt optimization
func DefaultOptimizationConfig() OptimizationConfig {
return OptimizationConfig{
RatingSystem: "numerical",
Threshold: 0.8,
MaxRetries: 3,
RetryDelay: time.Second * 2,
Metrics: []Metric{
{Name: "Relevance", Description: "How relevant the prompt is to the task"},
{Name: "Clarity", Description: "How clear and unambiguous the prompt is"},
{Name: "Specificity", Description: "How specific and detailed the prompt is"},
},
}
}
// BatchPromptOptimizer handles batch optimization of prompts
type BatchPromptOptimizer struct {
LLM LLM
Verbose bool
}
// NewBatchPromptOptimizer creates a new BatchPromptOptimizer
func NewBatchPromptOptimizer(llm LLM) *BatchPromptOptimizer {
return &BatchPromptOptimizer{
LLM: llm,
}
}
// PromptExample represents a single prompt to be optimized
type PromptExample struct {
Name string
Prompt string
Description string
Metrics []Metric
Threshold float64
}
// OptimizationResult represents the result of a single prompt optimization
type OptimizationResult struct {
Name string
OriginalPrompt string
OptimizedPrompt string
GeneratedContent string
Error error
}
// OptimizePrompts optimizes a batch of prompts concurrently
func (bpo *BatchPromptOptimizer) OptimizePrompts(ctx context.Context, examples []PromptExample) []OptimizationResult {
results := make([]OptimizationResult, len(examples))
var wg sync.WaitGroup
for i, example := range examples {
wg.Add(1)
go func(i int, example PromptExample) {
defer wg.Done()
config := OptimizationConfig{
Prompt: example.Prompt,
Description: example.Description,
Metrics: example.Metrics,
RatingSystem: "numerical",
Threshold: example.Threshold,
MaxRetries: 3,
RetryDelay: time.Second * 2,
}
optimizedPrompt, response, err := OptimizePrompt(ctx, bpo.LLM, config)
results[i] = OptimizationResult{
Name: example.Name,
OriginalPrompt: example.Prompt,
OptimizedPrompt: optimizedPrompt,
GeneratedContent: response,
Error: err,
}
if bpo.Verbose {
fmt.Printf("Optimized prompt for %s: %s\n", example.Name, optimizedPrompt)
}
}(i, example)
}
wg.Wait()
return results
}
// OptimizePrompt optimizes the given prompt and generates a response
func OptimizePrompt(ctx context.Context, llm LLM, config OptimizationConfig) (optimizedPrompt string, response string, err error) {
opts := []OptimizerOption{
WithCustomMetrics(config.Metrics...),
WithRatingSystem(config.RatingSystem),
WithOptimizationGoal(fmt.Sprintf("Optimize the prompt for %s", config.Description)),
WithMaxRetries(config.MaxRetries),
WithRetryDelay(config.RetryDelay),
WithThreshold(config.Threshold),
}
optimizer := NewPromptOptimizer(llm, config.Prompt, config.Description, opts...)
optimizedPrompt, err = optimizer.OptimizePrompt(ctx)
if err != nil {
return "", "", fmt.Errorf("optimization failed: %w", err)
}
response, err = llm.Generate(ctx, NewPrompt(optimizedPrompt))
if err != nil {
return optimizedPrompt, "", fmt.Errorf("response generation failed: %w", err)
}
return optimizedPrompt, response, nil
}