Compare commits

..

2 Commits

Author SHA1 Message Date
Kevin Dang
97acae3d08 added embed msg and stream parser 2024-01-28 12:59:45 -08:00
Kevin Dang
aaf734b06c added ollamajs esm 2024-01-25 18:24:37 -08:00
6 changed files with 98 additions and 24 deletions

18
.gitignore vendored
View File

@@ -1,10 +1,13 @@
# Credentials
.env
.dev.env
# Created by https://www.toptal.com/developers/gitignore/api/node
# Edit at https://www.toptal.com/developers/gitignore?templates=node
# dotenv environment variable files
.env
.env.dev.local
.env.test.local
.env.production.local
.env.local
### Node ###
# Logs
logs
@@ -80,13 +83,6 @@ web_modules/
# Yarn Integrity file
.yarn-integrity
# dotenv environment variable files
.env
.env.development.local
.env.test.local
.env.production.local
.env.local
# parcel-bundler cache (https://parceljs.org/)
.cache
.parcel-cache

16
package-lock.json generated
View File

@@ -12,7 +12,8 @@
"axios": "^1.6.2",
"concurrently": "^8.2.2",
"discord.js": "^14.14.1",
"dotenv": "^16.3.1"
"dotenv": "^16.3.1",
"ollama": "^0.4.3"
},
"devDependencies": {
"@types/node": "^20.10.5",
@@ -1312,6 +1313,14 @@
"node": ">=0.10.0"
}
},
"node_modules/ollama": {
"version": "0.4.3",
"resolved": "https://registry.npmjs.org/ollama/-/ollama-0.4.3.tgz",
"integrity": "sha512-l8RtRp5uKhvqbAmA9n5PS4ZW1RPbA2CPSlNinJ+jVQYD36UjxPQphEPxp0etH/wqY26nHyM6HQEHatIfrW0+Tw==",
"dependencies": {
"whatwg-fetch": "^3.6.20"
}
},
"node_modules/picomatch": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
@@ -1598,6 +1607,11 @@
"integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==",
"dev": true
},
"node_modules/whatwg-fetch": {
"version": "3.6.20",
"resolved": "https://registry.npmjs.org/whatwg-fetch/-/whatwg-fetch-3.6.20.tgz",
"integrity": "sha512-EqhiFU6daOA8kpjOWTL0olhVOF3i7OrFzSYiGsEMB8GcXS+RrzauAERX65xMeNWVqxA6HXH2m69Z9LaKKdisfg=="
},
"node_modules/wrap-ansi": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",

View File

@@ -1,6 +1,6 @@
{
"name": "discord-ollama",
"version": "0.1.1",
"version": "0.1.2",
"description": "Ollama Integration into discord",
"main": "dist/index.js",
"exports": "./dist/index.js",
@@ -19,7 +19,8 @@
"axios": "^1.6.2",
"concurrently": "^8.2.2",
"discord.js": "^14.14.1",
"dotenv": "^16.3.1"
"dotenv": "^16.3.1",
"ollama": "^0.4.3"
},
"devDependencies": {
"@types/node": "^20.10.5",

View File

@@ -1,10 +1,13 @@
import { event, Events } from '../utils/index.js'
import { EmbedBuilder } from 'discord.js'
import ollama from 'ollama'
import Axios from 'axios'
/*
/**
* Max Message length for free users is 2000 characters (bot or not).
* @param message the message received from the channel
*/
export default event(Events.MessageCreate, ({ log, msgHist }, message) => {
export default event(Events.MessageCreate, async ({ log, msgHist }, message) => {
log(`Message created \"${message.content}\" from ${message.author.tag}.`)
// Hard-coded channel to test output there only, in our case "ollama-endpoint"
@@ -19,7 +22,44 @@ export default event(Events.MessageCreate, ({ log, msgHist }, message) => {
content: message.content
})
// Reply with something to prompt that ollama is working
const botMessage = new EmbedBuilder()
.setTitle(`Response to ${message.author.tag}`)
.setDescription('Generating Response . . .')
.setColor('#00FF00')
const sentMessage = await message.channel.send({ embeds: [botMessage] })
const request = async () => {
try {
// change this when using an actual hosted server or use ollama.js
const response = await ollama.chat({
model: 'llama2',
messages: msgHist,
stream: false
})
const embed = new EmbedBuilder()
.setTitle(`Response to ${message.author.tag}`)
.setDescription(response.message.content)
.setColor('#00FF00')
sentMessage.edit({ embeds: [embed] })
// push bot response
msgHist.push({
role: 'assistant',
content: response.message.content
})
} catch (error) {
message.edit(error as string)
log(error)
}
}
// Attempt to call ollama's endpoint
request()
// Reply with something to prompt that ollama is working, version without embed
message.reply("Generating Response . . .").then(sentMessage => {
// Request made to API
const request = async () => {
@@ -30,19 +70,19 @@ export default event(Events.MessageCreate, ({ log, msgHist }, message) => {
messages: msgHist,
stream: false
})
log(response.data)
sentMessage.edit(response.data.message.content)
// push bot response
msgHist.push({
role: 'assistant',
content: response.data.message.content
})
// msgHist.push({
// role: 'assistant',
// content: response.data.message.content
// })
} catch (error) {
message.edit(error as string)
log(error)
}
}
}
// Attempt to call ollama's endpoint
request()

View File

@@ -2,7 +2,7 @@ import { resolve } from "path"
import { config } from "dotenv"
// Find config - ONLY WORKS WITH NODEMON
const envFile = process.env.NODE_ENV === "development" ? ".dev.env" : ".env"
const envFile = process.env.NODE_ENV === "development" ? ".env.dev.local" : ".env"
// resolve config file
const envFilePath = resolve(process.cwd(), envFile)

23
src/utils/streamParse.ts Normal file
View File

@@ -0,0 +1,23 @@
import { AxiosResponse } from "axios";
/**
* When running a /api/chat stream, the output needs to be parsed into an array of objects
* @param stream Axios response to from Ollama
*/
export function parseStream(stream: AxiosResponse<any, any>) {
// split string by newline
const keywordObjects: string[] = stream.data.trim().split('\n')
// parse string and load them into objects
const keywordsArray: {
model: string,
created_at: string,
message: {
role: string,
content: string
},
done: boolean
}[] = keywordObjects.map((keywordString) => JSON.parse(keywordString))
return keywordsArray
}