diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 5f4a155..c023cd4 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -23,10 +23,7 @@
## Environment
* You will need two environment files:
* `.env`: for running the bot
- * `CLIENT_TOKEN`: the token for the bot to log in
- * `CHANNEL_ID`: the id of the channel you wish for the bot to listen in
- * `MODEL`: the mode you wish to use
- * `BOT_UID`: the user id the bot goes by (the id of the discord user)
+ * Please refer to `.env.sample` for all environment variables to include
* `.env.dev.local`: also runs the bot, but with development variables
* Currently there are no differences between the two, but when needed, you may add environment variables as needed.
diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml
index bef512d..03ae91e 100644
--- a/.github/workflows/build-test.yml
+++ b/.github/workflows/build-test.yml
@@ -37,9 +37,12 @@ jobs:
echo CLIENT_UID = ${{ secrets.CLIENT_UID }} >> .env
echo OLLAMA_IP = ${{ secrets.OLLAMA_IP }} >> .env
echo OLLAMA_PORT = ${{ secrets.OLLAMA_PORT }} >> .env
-
+ echo ADMINS = ${{ secrets.ADMINS }} >> .env
+
+ # set -e ensures if nohup fails, this section fails
- name: Startup Discord Bot Client
run: |
+ set -e
nohup npm run prod &
Discord-Ollama-Container-Build: # test docker build and run
diff --git a/README.md b/README.md
index 82d9402..dadb5a7 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,26 @@
-# Discord Ollama Integration [](https://creativecommons.org/licenses/by-nc/4.0/) [](https://github.com/kevinthedang/discord-ollama/releases/latest)
-Ollama is an AI model management tool that allows users to install and use custom large language models locally. The goal is to create a discord bot that will utilize Ollama and chat with it on a Discord server! Also, allow others to create their own models personalized for their own servers!
+
+



+
Discord Ollama Integration
+
Ollama as your Discord AI Assistant
+
+
+
+
+
+## About/Goals
+Ollama is an AI model management tool that allows users to install and use custom large language models locally.
+The project aims to:
+* [x] Create a Discord bot that will utilize Ollama and chat to chat with users!
+ * [ ] User Preferences on Chat
+ * [ ] Message Persistance on Channels and Threads
+ * [x] Containerization with Docker
+ * [x] Slash Commands Compatible
+ * [ ] Generated Token Length Handling for >2000 or >6000 characters
+ * [ ] External WebUI Integration
+ * [ ] Administrator Role Compatible
+* [ ] Allow others to create their own models personalized for their own servers!
+ * [ ] Documentation on creating your own LLM
+ * [ ] Documentation on web scrapping and cleaning
## Environment Setup
* Clone this repo using `git clone https://github.com/kevinthedang/discord-ollama.git` or just use [GitHub Desktop](https://desktop.github.com/) to clone the repo.
diff --git a/docker-compose.yml b/docker-compose.yml
index e298b5b..2b0f93b 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -8,7 +8,7 @@ services:
build: ./ # find docker file in designated path
container_name: discord
restart: always # rebuild container always
- image: discord/bot:0.3.4
+ image: discord/bot:0.3.5
environment:
CLIENT_TOKEN: ${CLIENT_TOKEN}
GUILD_ID: ${GUILD_ID}
diff --git a/imgs/discord-icon.png b/imgs/discord-icon.png
new file mode 100644
index 0000000..c5d49da
Binary files /dev/null and b/imgs/discord-icon.png differ
diff --git a/imgs/grey-plus.png b/imgs/grey-plus.png
new file mode 100644
index 0000000..584610a
Binary files /dev/null and b/imgs/grey-plus.png differ
diff --git a/imgs/ollama-icon.png b/imgs/ollama-icon.png
new file mode 100644
index 0000000..0bc1220
Binary files /dev/null and b/imgs/ollama-icon.png differ
diff --git a/package-lock.json b/package-lock.json
index f270b05..d82aa88 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "discord-ollama",
- "version": "0.3.4",
+ "version": "0.3.5",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "discord-ollama",
- "version": "0.3.4",
+ "version": "0.3.5",
"license": "ISC",
"dependencies": {
"axios": "^1.6.2",
diff --git a/package.json b/package.json
index fcb351e..f620d57 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "discord-ollama",
- "version": "0.3.4",
+ "version": "0.3.5",
"description": "Ollama Integration into discord",
"main": "build/index.js",
"exports": "./build/index.js",
diff --git a/src/client.ts b/src/client.ts
index c4587e8..f0d4dc4 100644
--- a/src/client.ts
+++ b/src/client.ts
@@ -2,6 +2,7 @@ import { Client, GatewayIntentBits } from 'discord.js'
import { UserMessage, registerEvents } from './utils/events.js'
import Events from './events/index.js'
import { Ollama } from 'ollama'
+import { Queue } from './queues/queue.js'
// Import keys/tokens
import Keys from './keys.js'
@@ -23,12 +24,7 @@ const ollama = new Ollama({
})
// Create Queue managed by Events
-const messageHistory: [UserMessage] = [
- {
- role: 'system',
- content: 'Your name is Ollama GU'
- }
-]
+const messageHistory: Queue = new Queue
/**
* register events for bot to listen to in discord
@@ -44,4 +40,10 @@ await client.login(Keys.clientToken)
.catch((error) => {
console.error('[Login Error]', error)
process.exit(1)
+})
+
+// queue up bots name
+messageHistory.enqueue({
+ role: 'assistant',
+ content: `My name is ${client.user?.username}`
})
\ No newline at end of file
diff --git a/src/commands/shutoff.ts b/src/commands/shutoff.ts
index 8e9a573..e954a78 100644
--- a/src/commands/shutoff.ts
+++ b/src/commands/shutoff.ts
@@ -29,12 +29,18 @@ export const Shutoff: SlashCommand = {
const superUsers: string[] = JSON.parse(Keys.superUser.replace(/'/g, '"'))
// check if admin or false on shutdown
- if (interaction.user.tag in superUsers || !(!interaction.options.get('are-you-sure')?.value && interaction.user.tag in superUsers)) {
+ if (interaction.user.tag !in superUsers) {
interaction.reply({
- content: `Shutdown failed:\n\n${interaction.user.tag}, You do not have permission to shutoff **${client.user?.tag}**, otherwise, you just didn't want to.`,
+ content: `Shutdown failed:\n\n${interaction.user.tag}, You do not have permission to shutoff **${client.user?.tag}**.`,
ephemeral: true
})
return // stop from shutting down
+ } else if (!interaction.options.get('are-you-sure')?.value) {
+ interaction.reply({
+ content: `Shutdown failed:\n\n${interaction.user.tag}, You didn't want to shutoff **${client.user?.tag}**.`,
+ ephemeral: true
+ })
+ return
}
interaction.reply({
diff --git a/src/events/messageCreate.ts b/src/events/messageCreate.ts
index 9bac64f..aeb0c8e 100644
--- a/src/events/messageCreate.ts
+++ b/src/events/messageCreate.ts
@@ -18,8 +18,11 @@ export default event(Events.MessageCreate, async ({ log, msgHist, tokens, ollama
// Only respond if message mentions the bot
if (!message.mentions.has(tokens.clientUid)) return
+ // check if we can push, if not, remove oldest
+ if (msgHist.size() === msgHist.getCapacity()) msgHist.dequeue()
+
// push user response
- msgHist.push({
+ msgHist.enqueue({
role: 'user',
content: message.content
})
@@ -43,8 +46,8 @@ export default event(Events.MessageCreate, async ({ log, msgHist, tokens, ollama
})
})
- let response: ChatResponse
-
+ let response: ChatResponse
+
// undefined or false, use normal, otherwise use embed
if (config.options['message-style'])
response = await embedMessage(message, ollama, tokens, msgHist)
@@ -54,14 +57,17 @@ export default event(Events.MessageCreate, async ({ log, msgHist, tokens, ollama
// If something bad happened, remove user query and stop
if (response == undefined) { msgHist.pop(); return }
+ // if queue is full, remove the oldest message
+ if (msgHist.size() === msgHist.getCapacity()) msgHist.dequeue()
+
// successful query, save it as history
- msgHist.push({
+ msgHist.enqueue({
role: 'assistant',
content: response.message.content
})
} catch (error: any) {
msgHist.pop() // remove message because of failure
openFile('config.json', 'message-style', true)
- message.reply(`**Response generation failed.**\n\n**Reason:** *${error.message}*`)
+ message.reply(`**Error Occurred:**\n\n**Reason:** *${error.message}*`)
}
})
\ No newline at end of file
diff --git a/src/queues/queue.ts b/src/queues/queue.ts
new file mode 100644
index 0000000..c634b98
--- /dev/null
+++ b/src/queues/queue.ts
@@ -0,0 +1,70 @@
+// Queue interfaces for any queue class to follow
+interface IQueue {
+ enqueue(item: T): void
+ dequeue(): T | undefined
+ size(): number
+}
+
+/**
+ * Queue for UserMessages
+ * When the limit for messages is met, we want to clear
+ * out the oldest message in the queue
+ */
+export class Queue implements IQueue {
+ private storage: T[] = []
+
+ /**
+ * Set up Queue
+ * @param capacity max length of queue
+ */
+ constructor(private capacity: number = 5) {}
+
+ /**
+ * Put item in front of queue
+ * @param item object of type T to add into queue
+ */
+ enqueue(item: T): void {
+ if (this.size() === this.capacity)
+ throw Error('Queue has reached max capacity, you cannot add more items.')
+ this.storage.push(item)
+ }
+
+ /**
+ * Remove item at end of queue
+ * @returns object of type T in queue
+ */
+ dequeue(): T | undefined {
+ return this.storage.shift()
+ }
+
+ /**
+ * Size of the queue
+ * @returns length of queue as a int/number
+ */
+ size(): number {
+ return this.storage.length
+ }
+
+ /**
+ * Remove the front of the queue, typically for errors
+ */
+ pop(): void {
+ this.storage.pop()
+ }
+
+ /**
+ * Geet the queue as an array
+ * @returns a array of T items
+ */
+ getItems(): T[] {
+ return this.storage
+ }
+
+ /**
+ * Get capacity of the queue
+ * @returns capacity of queue
+ */
+ getCapacity(): number {
+ return this.capacity
+ }
+}
\ No newline at end of file
diff --git a/src/utils/events.ts b/src/utils/events.ts
index 50228de..13114e5 100644
--- a/src/utils/events.ts
+++ b/src/utils/events.ts
@@ -1,5 +1,6 @@
-import type { ClientEvents, Awaitable, Client } from 'discord.js'
+import type { ClientEvents, Awaitable, Client, User } from 'discord.js'
import { Ollama } from 'ollama'
+import { Queue } from '../queues/queue.js'
// Export events through here to reduce amount of imports
export { Events } from 'discord.js'
@@ -33,7 +34,7 @@ export type UserMessage = {
export interface EventProps {
client: Client
log: LogMethod
- msgHist: { role: string, content: string }[]
+ msgHist: Queue
tokens: Tokens,
ollama: Ollama
}
@@ -63,7 +64,7 @@ export function event(key: T, callback: EventCallback):
export function registerEvents(
client: Client,
events: Event[],
- msgHist: UserMessage[],
+ msgHist: Queue,
tokens: Tokens,
ollama: Ollama
): void {
diff --git a/src/utils/messageEmbed.ts b/src/utils/messageEmbed.ts
index fa3616f..4ed9f19 100644
--- a/src/utils/messageEmbed.ts
+++ b/src/utils/messageEmbed.ts
@@ -1,6 +1,7 @@
import { EmbedBuilder, Message } from 'discord.js'
import { ChatResponse, Ollama } from 'ollama'
import { UserMessage } from './events.js'
+import { Queue } from '../queues/queue.js'
/**
* Method to send replies as normal text on discord like any other user
@@ -15,7 +16,7 @@ export async function embedMessage(
channel: string,
model: string
},
- msgHist: UserMessage[]
+ msgHist: Queue
) {
// bot response
let response: ChatResponse
@@ -33,7 +34,7 @@ export async function embedMessage(
// Attempt to query model for message
response = await ollama.chat({
model: tokens.model,
- messages: msgHist,
+ messages: msgHist.getItems(),
options: {
num_thread: 8, // remove if optimization needed further
mirostat: 1,
diff --git a/src/utils/messageNormal.ts b/src/utils/messageNormal.ts
index bcd94f0..0a34e25 100644
--- a/src/utils/messageNormal.ts
+++ b/src/utils/messageNormal.ts
@@ -1,6 +1,7 @@
import { Message } from 'discord.js'
import { ChatResponse, Ollama } from 'ollama'
import { UserMessage } from './events.js'
+import { Queue } from '../queues/queue.js'
/**
* Method to send replies as normal text on discord like any other user
@@ -15,7 +16,7 @@ export async function normalMessage(
channel: string,
model: string
},
- msgHist: UserMessage[]
+ msgHist: Queue
) {
// bot's respnse
let response: ChatResponse
@@ -25,7 +26,7 @@ export async function normalMessage(
// Attempt to query model for message
response = await ollama.chat({
model: tokens.model,
- messages: msgHist,
+ messages: msgHist.getItems(),
options: {
num_thread: 8, // remove if optimization needed further
mirostat: 1,