aboutsummaryrefslogtreecommitdiff
path: root/cmd/sapientwindex
diff options
context:
space:
mode:
authorXe Iaso <me@xeiaso.net>2023-09-26 06:18:56 -0400
committerXe Iaso <me@xeiaso.net>2023-09-26 06:18:56 -0400
commite260e2b17bbecd34afb49c50cd7dbf4d3b43fed3 (patch)
tree64f9ab425c6037509ae13b9f1a7762837150c5c4 /cmd/sapientwindex
parent2654099b2645423152de4e790ea658ff0ab632e4 (diff)
downloadx-e260e2b17bbecd34afb49c50cd7dbf4d3b43fed3.tar.xz
x-e260e2b17bbecd34afb49c50cd7dbf4d3b43fed3.zip
cmd/sapientwindex: add reddit bot
Signed-off-by: Xe Iaso <me@xeiaso.net>
Diffstat (limited to 'cmd/sapientwindex')
-rw-r--r--cmd/sapientwindex/foo.json11
-rw-r--r--cmd/sapientwindex/llama.go112
-rw-r--r--cmd/sapientwindex/main.go163
-rw-r--r--cmd/sapientwindex/prompts/helper.txt11
-rw-r--r--cmd/sapientwindex/prompts/moderation.txt7
5 files changed, 304 insertions, 0 deletions
diff --git a/cmd/sapientwindex/foo.json b/cmd/sapientwindex/foo.json
new file mode 100644
index 0000000..3b01251
--- /dev/null
+++ b/cmd/sapientwindex/foo.json
@@ -0,0 +1,11 @@
+{
+ "temperature": 0.8,
+ "top_k": 40,
+ "top_p": 0.9,
+ "stream": false,
+ "prompt": "<s>[INST] <<SYS>>\nYou are an expert in creating tulpas, also known as tulpamancy. When you are given questions from users, you will answer questions in one paragraph like a redditor with casual language. ONLY reply in plain text. DO NOT return anything but your response. DO NOT use emoji.\n\nBegin your answer with ANSWER:\n<</SYS>>\nAnswer this question:\n\nHow I can understand that tulpa is really answering me and I'm not imagining his answer?\n\nSometimes I'm really not sure with this. Maybe someone can help me?\n[/INST]",
+ "repeat_penalty": 1.15,
+ "repeat_last_n": 512,
+ "mirostat": 2,
+ "n_predict": 2048
+}
diff --git a/cmd/sapientwindex/llama.go b/cmd/sapientwindex/llama.go
new file mode 100644
index 0000000..7895832
--- /dev/null
+++ b/cmd/sapientwindex/llama.go
@@ -0,0 +1,112 @@
+package main
+
+import (
+ "bytes"
+ "encoding/json"
+ "flag"
+ "io"
+ "net/http"
+
+ "within.website/x/web"
+)
+
+var (
+ llamaServer = flag.String("llama-server", "http://kos-mos:8080/completion", "API server for LLAMA 2")
+)
+
+func Predict(opts *LLAMAOpts) (*LLAMAResponse, error) {
+ jsonData, err := json.Marshal(opts)
+ if err != nil {
+ return nil, err
+ }
+ // Make a POST request to the server
+ resp, err := http.Post(*llamaServer, "application/json", bytes.NewBuffer(jsonData))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ // Check the response status code
+ if resp.StatusCode != http.StatusOK {
+ return nil, web.NewError(http.StatusOK, resp)
+ }
+
+ data, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ var result LLAMAResponse
+
+ if err := json.Unmarshal(data, &result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+type LLAMAOpts struct {
+ Temperature float64 `json:"temperature"`
+ TopK int `json:"top_k"`
+ TopP float64 `json:"top_p"`
+ Stream bool `json:"stream"`
+ Prompt string `json:"prompt"`
+ RepeatPenalty float64 `json:"repeat_penalty"`
+ RepeatLastN int `json:"repeat_last_n"`
+ Mirostat int `json:"mirostat"`
+ NPredict int `json:"n_predict"`
+}
+
+type LLAMAResponse struct {
+ Content string `json:"content"`
+ GenerationSettings GenerationSettings `json:"generation_settings"`
+ Model string `json:"model"`
+ Prompt string `json:"prompt"`
+ Stop bool `json:"stop"`
+ StoppedEos bool `json:"stopped_eos"`
+ StoppedLimit bool `json:"stopped_limit"`
+ StoppedWord bool `json:"stopped_word"`
+ StoppingWord string `json:"stopping_word"`
+ Timings Timings `json:"timings"`
+ TokensCached int `json:"tokens_cached"`
+ TokensEvaluated int `json:"tokens_evaluated"`
+ TokensPredicted int `json:"tokens_predicted"`
+ Truncated bool `json:"truncated"`
+}
+
+type GenerationSettings struct {
+ FrequencyPenalty float64 `json:"frequency_penalty"`
+ Grammar string `json:"grammar"`
+ IgnoreEos bool `json:"ignore_eos"`
+ LogitBias []any `json:"logit_bias"`
+ Mirostat int `json:"mirostat"`
+ MirostatEta float64 `json:"mirostat_eta"`
+ MirostatTau float64 `json:"mirostat_tau"`
+ Model string `json:"model"`
+ NCtx int `json:"n_ctx"`
+ NKeep int `json:"n_keep"`
+ NPredict int `json:"n_predict"`
+ NProbs int `json:"n_probs"`
+ PenalizeNl bool `json:"penalize_nl"`
+ PresencePenalty float64 `json:"presence_penalty"`
+ RepeatLastN int `json:"repeat_last_n"`
+ RepeatPenalty float64 `json:"repeat_penalty"`
+ Seed int64 `json:"seed"`
+ Stop []any `json:"stop"`
+ Stream bool `json:"stream"`
+ Temp float64 `json:"temp"`
+ TfsZ float64 `json:"tfs_z"`
+ TopK int `json:"top_k"`
+ TopP float64 `json:"top_p"`
+ TypicalP float64 `json:"typical_p"`
+}
+
+type Timings struct {
+ PredictedMs float64 `json:"predicted_ms"`
+ PredictedN int `json:"predicted_n"`
+ PredictedPerSecond float64 `json:"predicted_per_second"`
+ PredictedPerTokenMs float64 `json:"predicted_per_token_ms"`
+ PromptMs float64 `json:"prompt_ms"`
+ PromptN int `json:"prompt_n"`
+ PromptPerSecond float64 `json:"prompt_per_second"`
+ PromptPerTokenMs float64 `json:"prompt_per_token_ms"`
+}
diff --git a/cmd/sapientwindex/main.go b/cmd/sapientwindex/main.go
new file mode 100644
index 0000000..dc825ba
--- /dev/null
+++ b/cmd/sapientwindex/main.go
@@ -0,0 +1,163 @@
+package main
+
+import (
+ "bytes"
+ "embed"
+ "flag"
+ "fmt"
+ "log"
+ "log/slog"
+ "strings"
+ "text/template"
+ "time"
+
+ "github.com/Marcel-ICMC/graw"
+ "github.com/Marcel-ICMC/graw/reddit"
+ "within.website/x/internal"
+)
+
+var (
+ redditUsername = flag.String("reddit-username", "", "reddit username")
+ redditPassword = flag.String("reddit-password", "", "reddit password")
+ redditAppID = flag.String("reddit-app-id", "", "reddit app id")
+ redditAppSecret = flag.String("reddit-app-secret", "", "reddit app secret")
+ subreddit = flag.String("subreddit", "shadowh511", "subreddit to post to")
+ scanDuration = flag.Duration("scan-duration", 30*time.Second, "how long to scan for")
+
+ //go:embed prompts/*.txt
+ prompts embed.FS
+)
+
+func main() {
+ internal.HandleStartup()
+
+ slog.Info("starting up", "username", *redditUsername, "subreddit", *subreddit, "scan_duration", (*scanDuration).String())
+
+ cfg := reddit.BotConfig{
+ Agent: "graw:sapientwindex:0.0.1 by /u/shadowh511",
+ App: reddit.App{
+ ID: *redditAppID,
+ Secret: *redditAppSecret,
+ Username: *redditUsername,
+ Password: *redditPassword,
+ },
+ }
+
+ bot, err := reddit.NewBot(cfg)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ handle, err := reddit.NewScript(cfg.Agent, *scanDuration)
+ if err != nil {
+ log.Fatal(err)
+ }
+ announce := &announcer{bot: bot}
+
+ scriptCfg := graw.Config{Subreddits: []string{*subreddit, "shadowh511"}}
+
+ stop, wait, err := graw.Scan(announce, handle, scriptCfg)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ defer stop()
+
+ wait()
+}
+
+type announcer struct {
+ bot reddit.Bot
+}
+
+func makePrompt(kind, title, body string) (string, error) {
+ data, err := prompts.ReadFile("prompts/" + kind + ".txt")
+ if err != nil {
+ return "", fmt.Errorf("read prompt: %w", err)
+ }
+
+ tmpl, err := template.New("prompt").Parse(string(data))
+ if err != nil {
+ return "", fmt.Errorf("parse prompts: %w", err)
+ }
+
+ var prompt bytes.Buffer
+ err = tmpl.Execute(&prompt, struct {
+ Title string
+ Body string
+ }{
+ Title: title,
+ Body: body,
+ })
+ if err != nil {
+ return "", fmt.Errorf("execute template: %w", err)
+ }
+
+ return prompt.String(), nil
+}
+
+func (a *announcer) Post(post *reddit.Post) error {
+ if post.LinkFlairText == "Personal" {
+ return nil
+ }
+
+ slog.Info("got post", "title", post.Title, "body", post.SelfText)
+
+ prompt, err := makePrompt("moderation", post.Title, post.SelfText)
+ if err != nil {
+ slog.Error("make prompt", "err", err)
+ return nil
+ }
+
+ opts := &LLAMAOpts{
+ Temperature: 0.8,
+ TopK: 40,
+ TopP: 0.9,
+ Stream: false,
+ Prompt: prompt,
+ RepeatPenalty: 1.15,
+ RepeatLastN: 512,
+ Mirostat: 2,
+ NPredict: 2048,
+ }
+
+ resp, err := Predict(opts)
+ if err != nil {
+ slog.Error("predict", "err", err)
+ return nil
+ }
+
+ if !strings.HasPrefix(strings.ToUpper(strings.TrimSpace(resp.Content)), "YES") {
+ slog.Info("not a question, skipping", "title", post.Title, "body", post.SelfText, "response", resp.Content)
+ return nil
+ }
+
+ prompt, err = makePrompt("helper", post.Title, post.SelfText)
+ if err != nil {
+ slog.Error("make prompt", "err", err)
+ return nil
+ }
+
+ opts.Prompt = prompt
+
+ resp, err = Predict(opts)
+ if err != nil {
+ slog.Error("predict", "err", err)
+ return nil
+ }
+
+ body := massageAnswer(resp.Content)
+
+ if err := a.bot.Reply(post.Name, body); err != nil {
+ slog.Error("reply", "err", err)
+ return nil
+ }
+
+ return nil
+}
+
+func massageAnswer(answer string) string {
+ answer = strings.TrimSpace(answer)
+ answer = strings.TrimPrefix(answer, "ANSWER: ")
+ return answer
+}
diff --git a/cmd/sapientwindex/prompts/helper.txt b/cmd/sapientwindex/prompts/helper.txt
new file mode 100644
index 0000000..4fe2a47
--- /dev/null
+++ b/cmd/sapientwindex/prompts/helper.txt
@@ -0,0 +1,11 @@
+<s>[INST] <<SYS>>
+You are an expert in creating tulpas, also known as tulpamancy. When you are given questions from users, you will answer questions in one paragraph like a redditor with casual language. ONLY reply in plain text. DO NOT return anything but your response. DO NOT use emoji.
+
+Begin your answer with ANSWER:
+<</SYS>>
+Answer this question:
+
+{{.Title}}
+
+{{.Body}}
+[/INST]
diff --git a/cmd/sapientwindex/prompts/moderation.txt b/cmd/sapientwindex/prompts/moderation.txt
new file mode 100644
index 0000000..e6e9620
--- /dev/null
+++ b/cmd/sapientwindex/prompts/moderation.txt
@@ -0,0 +1,7 @@
+<s>[INST] <<SYS>>
+You are the content moderator for a subreddit. Does this look like a question about tulpas or tulpamancy? Respond YES or NO.
+<</SYS>>
+{{.Title}}
+
+{{.Body}}
+[/INST] \ No newline at end of file