Compare commits

..

5 Commits

Author SHA1 Message Date
Kevin Dang
97acae3d08 added embed msg and stream parser 2024-01-28 12:59:45 -08:00
Kevin Dang
aaf734b06c added ollamajs esm 2024-01-25 18:24:37 -08:00
Kevin Dang
78921ee571 added persistence in chat endpoint 2024-01-23 22:24:26 -08:00
Kevin Dang
f8956b0b50 bot can edit message response 2024-01-23 21:26:39 -08:00
Kevin Dang
70103c1f5a readme ollama setup 2024-01-22 23:24:32 -08:00
9 changed files with 159 additions and 55 deletions

18
.gitignore vendored
View File

@@ -1,10 +1,13 @@
# Credentials
.env
.dev.env
# Created by https://www.toptal.com/developers/gitignore/api/node # Created by https://www.toptal.com/developers/gitignore/api/node
# Edit at https://www.toptal.com/developers/gitignore?templates=node # Edit at https://www.toptal.com/developers/gitignore?templates=node
# dotenv environment variable files
.env
.env.dev.local
.env.test.local
.env.production.local
.env.local
### Node ### ### Node ###
# Logs # Logs
logs logs
@@ -80,13 +83,6 @@ web_modules/
# Yarn Integrity file # Yarn Integrity file
.yarn-integrity .yarn-integrity
# dotenv environment variable files
.env
.env.development.local
.env.test.local
.env.production.local
.env.local
# parcel-bundler cache (https://parceljs.org/) # parcel-bundler cache (https://parceljs.org/)
.cache .cache
.parcel-cache .parcel-cache

View File

@@ -1,9 +1,23 @@
# Discord Ollama Integration [![License: CC BY-NC 4.0](https://img.shields.io/badge/License-CC_BY--NC_4.0-darkgreen.svg)](https://creativecommons.org/licenses/by-nc/4.0/) [![Release Badge](https://img.shields.io/github/v/release/kevinthedang/discord-ollama?logo=github)](https://github.com/kevinthedang/discord-ollama/releases/latest) # Discord Ollama Integration [![License: CC BY-NC 4.0](https://img.shields.io/badge/License-CC_BY--NC_4.0-darkgreen.svg)](https://creativecommons.org/licenses/by-nc/4.0/) [![Release Badge](https://img.shields.io/github/v/release/kevinthedang/discord-ollama?logo=github)](https://github.com/kevinthedang/discord-ollama/releases/latest)
Ollama is an AI model management tool that allows users to install and use custom large language models locally. The goal is to create a discord bot that will utilize Ollama and chat with it on a Discord! Ollama is an AI model management tool that allows users to install and use custom large language models locally. The goal is to create a discord bot that will utilize Ollama and chat with it on a Discord!
## Ollama Setup
* Go to Ollama's [Linux download page](https://ollama.ai/download/linux) and run the simple curl command they provide. The command should be `curl https://ollama.ai/install.sh | sh`.
* Now the the following commands in separate terminals to test out how it works!
* In terminal 1 -> `ollama serve` to setup ollama
* In terminal 2 -> `ollama run [model name]`, for example `ollama run llama2`
* The models can vary as you can create your own model. You can also view ollama's [library](https://ollama.ai/library) of models.
* This can also be done in [wsl](https://learn.microsoft.com/en-us/windows/wsl/install) for Windows machines.
* You can now interact with the model you just ran (it might take a second to startup).
* Response time varies with processing power!
## To Run ## To Run
* Clone this repo using `git clone https://github.com/kevinthedang/discord-ollama.git` or just use [GitHub Desktop](https://desktop.github.com/) to clone the repo. * Clone this repo using `git clone https://github.com/kevinthedang/discord-ollama.git` or just use [GitHub Desktop](https://desktop.github.com/) to clone the repo.
* You can run the bot by running `npm run start` which will build and run the decompiled typescript. * Run `npm install` to install the npm packages.
* You will need a `.env` file in the root of the project directory with the bot's token.
* For example, `CLIENT_TOKEN = [Bot Token]`
* Now, you can run the bot by running `npm run start` which will build and run the decompiled typescript and run the setup for ollama.
* **IMPORTANT**: This must be ran in the wsl/Linux instance to work properly! Using Command Prompt/Powershell/Git Bash/etc. will not work on Windows (at least in my experience).
* Refer to the [resources](#resources) on what node version to use. * Refer to the [resources](#resources) on what node version to use.
## Resources ## Resources
@@ -13,8 +27,8 @@ Ollama is an AI model management tool that allows users to install and use custo
* To run dev with `tsx`, you can use `v20.10.0` or earlier. * To run dev with `tsx`, you can use `v20.10.0` or earlier.
* This project supports any NodeJS version above `16.x.x` to only allow ESModules. * This project supports any NodeJS version above `16.x.x` to only allow ESModules.
* [Ollama](https://ollama.ai/) * [Ollama](https://ollama.ai/)
* [Docker Documentation](https://docs.docker.com/?_gl=1*nof6f8*_ga*MTQxNTc1MTYxOS4xNzAxNzI1ODAx*_ga_XJWPQMJYHQ*MTcwMjQxODUzOS4yLjEuMTcwMjQxOTgyMC41OS4wLjA.)
* [Discord Developer Portal](https://discord.com/developers/docs/intro) * [Discord Developer Portal](https://discord.com/developers/docs/intro)
* [Discord.js Docs](https://discord.js.org/docs/packages/discord.js/main)
## Acknowledgement ## Acknowledgement
* [Kevin Dang](https://github.com/kevinthedang) * [Kevin Dang](https://github.com/kevinthedang)

20
package-lock.json generated
View File

@@ -1,18 +1,19 @@
{ {
"name": "discord-ollama", "name": "discord-ollama",
"version": "0.0.1", "version": "0.1.1",
"lockfileVersion": 3, "lockfileVersion": 3,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "discord-ollama", "name": "discord-ollama",
"version": "0.0.1", "version": "0.1.1",
"license": "ISC", "license": "ISC",
"dependencies": { "dependencies": {
"axios": "^1.6.2", "axios": "^1.6.2",
"concurrently": "^8.2.2", "concurrently": "^8.2.2",
"discord.js": "^14.14.1", "discord.js": "^14.14.1",
"dotenv": "^16.3.1" "dotenv": "^16.3.1",
"ollama": "^0.4.3"
}, },
"devDependencies": { "devDependencies": {
"@types/node": "^20.10.5", "@types/node": "^20.10.5",
@@ -1312,6 +1313,14 @@
"node": ">=0.10.0" "node": ">=0.10.0"
} }
}, },
"node_modules/ollama": {
"version": "0.4.3",
"resolved": "https://registry.npmjs.org/ollama/-/ollama-0.4.3.tgz",
"integrity": "sha512-l8RtRp5uKhvqbAmA9n5PS4ZW1RPbA2CPSlNinJ+jVQYD36UjxPQphEPxp0etH/wqY26nHyM6HQEHatIfrW0+Tw==",
"dependencies": {
"whatwg-fetch": "^3.6.20"
}
},
"node_modules/picomatch": { "node_modules/picomatch": {
"version": "2.3.1", "version": "2.3.1",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
@@ -1598,6 +1607,11 @@
"integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==", "integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==",
"dev": true "dev": true
}, },
"node_modules/whatwg-fetch": {
"version": "3.6.20",
"resolved": "https://registry.npmjs.org/whatwg-fetch/-/whatwg-fetch-3.6.20.tgz",
"integrity": "sha512-EqhiFU6daOA8kpjOWTL0olhVOF3i7OrFzSYiGsEMB8GcXS+RrzauAERX65xMeNWVqxA6HXH2m69Z9LaKKdisfg=="
},
"node_modules/wrap-ansi": { "node_modules/wrap-ansi": {
"version": "7.0.0", "version": "7.0.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",

View File

@@ -1,6 +1,6 @@
{ {
"name": "discord-ollama", "name": "discord-ollama",
"version": "0.0.1", "version": "0.1.2",
"description": "Ollama Integration into discord", "description": "Ollama Integration into discord",
"main": "dist/index.js", "main": "dist/index.js",
"exports": "./dist/index.js", "exports": "./dist/index.js",
@@ -19,7 +19,8 @@
"axios": "^1.6.2", "axios": "^1.6.2",
"concurrently": "^8.2.2", "concurrently": "^8.2.2",
"discord.js": "^14.14.1", "discord.js": "^14.14.1",
"dotenv": "^16.3.1" "dotenv": "^16.3.1",
"ollama": "^0.4.3"
}, },
"devDependencies": { "devDependencies": {
"@types/node": "^20.10.5", "@types/node": "^20.10.5",

View File

@@ -14,7 +14,14 @@ const client = new Client({
] ]
}); });
registerEvents(client, Events) const messageHistory = [
{
role: 'assistant',
content: 'My name is Ollama GU.'
}
]
registerEvents(client, Events, messageHistory)
// Try to log in the client // Try to log in the client
client.login(Keys.clientToken) client.login(Keys.clientToken)

View File

@@ -1,42 +1,90 @@
import { event, Events } from '../utils/index.js' import { event, Events } from '../utils/index.js'
import { EmbedBuilder } from 'discord.js'
import ollama from 'ollama'
import Axios from 'axios' import Axios from 'axios'
/* /**
* Max Message length for free users is 2000 characters (bot or not). * Max Message length for free users is 2000 characters (bot or not).
* @param message the message received from the channel
*/ */
export default event(Events.MessageCreate, ({ log }, message) => { export default event(Events.MessageCreate, async ({ log, msgHist }, message) => {
log(`Message created \"${message.content}\" from ${message.author.tag}.`) log(`Message created \"${message.content}\" from ${message.author.tag}.`)
// Hard-coded channel to test output there only, in our case "ollama-endpoint" // Hard-coded channel to test output there only, in our case "ollama-endpoint"
if (message.channelId != '1188262786497785896') { if (message.channelId != '1188262786497785896') return
log(`Unauthorized Channel input, Aborting...`)
return
}
log(`Channel id OK!`)
// Do not respond if bot talks in the chat // Do not respond if bot talks in the chat
if (message.author.tag === message.client.user.tag) { if (message.author.tag === message.client.user.tag) return
log(`Found Bot message reply, Aborting...`)
return // push user response
} msgHist.push({
log(`Sender Checked!`) role: 'user',
content: message.content
})
const botMessage = new EmbedBuilder()
.setTitle(`Response to ${message.author.tag}`)
.setDescription('Generating Response . . .')
.setColor('#00FF00')
const sentMessage = await message.channel.send({ embeds: [botMessage] })
// Request made to API
const request = async () => { const request = async () => {
try { try {
const response = await Axios.post('http://127.0.0.1:11434/api/generate', { // change this when using an actual hosted server or use ollama.js
const response = await ollama.chat({
model: 'llama2', model: 'llama2',
prompt: message.content, messages: msgHist,
stream: false stream: false
}) })
log(response.data)
// const embed = new EmbedBuilder()
message.reply(response.data['response']) .setTitle(`Response to ${message.author.tag}`)
.setDescription(response.message.content)
.setColor('#00FF00')
sentMessage.edit({ embeds: [embed] })
// push bot response
msgHist.push({
role: 'assistant',
content: response.message.content
})
} catch (error) { } catch (error) {
message.edit(error as string)
log(error) log(error)
} }
} }
// Attempt to call ollama's endpoint // Attempt to call ollama's endpoint
request() request()
// Reply with something to prompt that ollama is working, version without embed
message.reply("Generating Response . . .").then(sentMessage => {
// Request made to API
const request = async () => {
try {
// change this when using an actual hosted server or use ollama.js
const response = await Axios.post('http://127.0.0.1:11434/api/chat', {
model: 'llama2',
messages: msgHist,
stream: false
})
sentMessage.edit(response.data.message.content)
// push bot response
// msgHist.push({
// role: 'assistant',
// content: response.data.message.content
// })
} catch (error) {
message.edit(error as string)
log(error)
}
}
// Attempt to call ollama's endpoint
request()
})
}) })

View File

@@ -2,7 +2,7 @@ import { resolve } from "path"
import { config } from "dotenv" import { config } from "dotenv"
// Find config - ONLY WORKS WITH NODEMON // Find config - ONLY WORKS WITH NODEMON
const envFile = process.env.NODE_ENV === "development" ? ".dev.env" : ".env" const envFile = process.env.NODE_ENV === "development" ? ".env.dev.local" : ".env"
// resolve config file // resolve config file
const envFilePath = resolve(process.cwd(), envFile) const envFilePath = resolve(process.cwd(), envFile)

View File

@@ -1,43 +1,44 @@
import type { ClientEvents, Awaitable, Client } from 'discord.js'; import type { ClientEvents, Awaitable, Client } from 'discord.js'
// Export events through here to reduce amount of imports // Export events through here to reduce amount of imports
export { Events } from 'discord.js'; export { Events } from 'discord.js'
export type LogMethod = (...args: unknown[]) => void; export type LogMethod = (...args: unknown[]) => void
export type EventKeys = keyof ClientEvents; // only wants keys of ClientEvents object export type EventKeys = keyof ClientEvents // only wants keys of ClientEvents object
// Event properties // Event properties
export interface EventProps { export interface EventProps {
client: Client; client: Client
log: LogMethod; log: LogMethod
msgHist: { role: string, content: string }[]
} }
export type EventCallback<T extends EventKeys> = ( export type EventCallback<T extends EventKeys> = (
props: EventProps, props: EventProps,
...args: ClientEvents[T] ...args: ClientEvents[T]
) => Awaitable<unknown>; // Method can be synchronous or async, unknown so we can return anything ) => Awaitable<unknown> // Method can be synchronous or async, unknown so we can return anything
// Event interface // Event interface
export interface Event<T extends EventKeys = EventKeys> { export interface Event<T extends EventKeys = EventKeys> {
key: T; key: T
callback: EventCallback<T>; callback: EventCallback<T>
} }
export function event<T extends EventKeys>(key: T, callback: EventCallback<T>): Event<T> { export function event<T extends EventKeys>(key: T, callback: EventCallback<T>): Event<T> {
return { key, callback }; return { key, callback }
} }
export function registerEvents(client: Client, events: Event[]): void { export function registerEvents(client: Client, events: Event[], msgHist: { role: string, content: string }[]): void {
for (const { key, callback } of events) { for (const { key, callback } of events) {
client.on(key, (...args) => { client.on(key, (...args) => {
// Create a new log method for this event // Create a new log method for this event
const log = console.log.bind(console, `[Event: ${key}]`); const log = console.log.bind(console, `[Event: ${key}]`)
// Handle Errors, call callback, log errors as needed // Handle Errors, call callback, log errors as needed
try { try {
callback({ client, log }, ...args); callback({ client, log, msgHist }, ...args)
} catch (error) { } catch (error) {
log('[Uncaught Error]', error); log('[Uncaught Error]', error)
} }
}); })
} }
} }

23
src/utils/streamParse.ts Normal file
View File

@@ -0,0 +1,23 @@
import { AxiosResponse } from "axios";
/**
* When running a /api/chat stream, the output needs to be parsed into an array of objects
* @param stream Axios response to from Ollama
*/
export function parseStream(stream: AxiosResponse<any, any>) {
// split string by newline
const keywordObjects: string[] = stream.data.trim().split('\n')
// parse string and load them into objects
const keywordsArray: {
model: string,
created_at: string,
message: {
role: string,
content: string
},
done: boolean
}[] = keywordObjects.map((keywordString) => JSON.parse(keywordString))
return keywordsArray
}