Some improvements

This commit is contained in:
2025-01-17 02:36:18 +01:00
parent dd3333ba9a
commit 5c94a354d8
5 changed files with 30 additions and 6 deletions
+1
View File
@@ -10,6 +10,7 @@ require (
require (
github.com/dlclark/regexp2 v1.11.4 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/joho/godotenv v1.5.1 // indirect
github.com/labstack/gommon v0.4.2 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
+2
View File
@@ -6,6 +6,8 @@ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/labstack/echo/v4 v4.13.2 h1:9aAt4hstpH54qIcqkuUXRLTf+v7yOTfMPWzDtuqLmtA=
github.com/labstack/echo/v4 v4.13.2/go.mod h1:uc9gDtHB8UWt3FfbYx0HyxcCuvR4YuPYOxF/1QjoV/c=
github.com/labstack/gommon v0.4.2 h1:F8qTUNXgG1+6WQmqoUWnz8WiEU60mXVVw0P4ht1WRA0=
+24 -4
View File
@@ -5,8 +5,10 @@ import (
"fmt"
"log"
"net/http"
"os"
"strconv"
"github.com/joho/godotenv"
"github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
"github.com/tmc/langchaingo/llms"
@@ -14,6 +16,23 @@ import (
)
func main() {
err := godotenv.Load()
if err != nil {
log.Fatal("Error loading environment!")
}
LLM_MODEL := os.Getenv("LLM_MODEL")
PORT := os.Getenv("PORT")
if len(LLM_MODEL) == 0 {
log.Fatal("No LLM model specified in environment!")
}
if len(PORT) == 0 {
PORT = "8080"
}
ech := echo.New()
ech.Use(middleware.CORS())
@@ -33,17 +52,18 @@ func main() {
ctx.Response().Header().Set(echo.HeaderContentType, echo.MIMEApplicationJSON)
ctx.Response().WriteHeader(http.StatusOK)
llm, err := ollama.New(ollama.WithModel("llama3.1:8b"))
llm, err := ollama.New(ollama.WithModel(fmt.Sprintf("%s", LLM_MODEL)))
if err != nil {
log.Fatal(err)
}
// TODO: Clean the prompt result of any unnecessary formatting or text
ollamaCtx := context.Background()
content := []llms.MessageContent{
llms.TextParts(llms.ChatMessageTypeSystem, `You are only a code generator. You must not respond with anything else but code and do not format with code fences. Always use spaces instead of tabs.`),
llms.TextParts(llms.ChatMessageTypeSystem, `You must only generate code without any descriptions. Also don't include any code comments and use spaces instead of tabs for spacing. Most importantly. Most importantly you must remove any markdown code fences that wrap the content!`),
llms.TextParts(llms.ChatMessageTypeHuman, fmt.Sprintf(`
Generate max %d lines of code without any unncessary formatting from a well known open source project in the %s programming language.`, lines, lang)),
Generate a maximum of %d lines of code from a well known open source project in the %s programming language.`, lines, lang)),
}
if _, err := llm.GenerateContent(ollamaCtx, content, llms.WithStreamingFunc(func(streamCtx context.Context, chunk []byte) error {
@@ -58,5 +78,5 @@ func main() {
return nil
})
ech.Logger.Fatal(ech.Start(":5000"))
ech.Logger.Fatal(ech.Start(fmt.Sprintf(":%s", PORT)))
}