Compare commits

..

6 Commits

Author SHA1 Message Date
Kevin Dang
727731695e Chat Queue Persistence (#33)
* fix: workflow env

* update: center title on readme

* update: readme goals and format

* add: icons in readme

* fix: plus margin

* update: environment variables in contr.

* add: queue for chat history

* add: set -e for workflow failure

* update: version increment

* fix: client null info

* fix: shutoff issues
2024-04-02 22:04:09 -07:00
Kevin Dang
5f8b513269 Workflows Fix (#32)
* fix: workflows missing new env
2024-04-01 00:51:07 -07:00
Kevin Dang
fcb0267559 Shutoff Bot Command (#30)
* add: disable chat command

* update: workflow name

* add: shutoff using admin env list

* update: sample env for admins

* fix: shutdown booleans

* update: version increment
2024-04-01 00:43:19 -07:00
Kevin Dang
6b903cff5e Auto-Generate Config (#29)
* fix: name in job

* add: auto create config.json on missing

* update: readme goals

* add: clarify instructions on fail chat

* update: reduced redundancy in package file
2024-03-30 22:02:49 -07:00
Kevin Dang
9320a7476e CI for Application Builds (#27) 2024-03-29 23:33:03 -07:00
Kevin Dang
1b70fc2787 Nvidia Container Toolkit Setup and Docs (#26)
* update: nvidia as runtime

* add: setup instructions for nvidia

* update: setup-docker.md
2024-03-28 12:01:33 -07:00
22 changed files with 368 additions and 41 deletions

View File

@@ -21,4 +21,7 @@ OLLAMA_PORT = PORT
DISCORD_IP = IP_ADDRESS
# subnet address, ex. 172.18.0.0 as we use /16.
SUBNET_ADDRESS = ADDRESS
SUBNET_ADDRESS = ADDRESS
# list of admins to handle admin commands for the bot, use single quotes
ADMINS=['username1', 'username2', 'username3', ...]

View File

@@ -23,10 +23,7 @@
## Environment
* You will need two environment files:
* `.env`: for running the bot
* `CLIENT_TOKEN`: the token for the bot to log in
* `CHANNEL_ID`: the id of the channel you wish for the bot to listen in
* `MODEL`: the mode you wish to use
* `BOT_UID`: the user id the bot goes by (the id of the discord user)
* Please refer to `.env.sample` for all environment variables to include
* `.env.dev.local`: also runs the bot, but with development variables
* Currently there are no differences between the two, but when needed, you may add environment variables as needed.

84
.github/workflows/build-test.yml vendored Normal file
View File

@@ -0,0 +1,84 @@
name: Builds
run-name: Validate Node and Docker Builds
on:
push:
branches:
- master
jobs:
Discord-Node-Build: # test if the node install and run
runs-on: ubuntu-latest
timeout-minutes: 2
steps:
- name: Checkout Repository
uses: actions/checkout@v4
- name: Set up Node Environment v18.18.2
uses: actions/setup-node@v4
with:
node-version: 18.18.2
cache: 'npm'
- name: Install Project Dependencies
run: |
npm install
- name: Build Application
run: |
npm run build
- name: Create Environment Variables
run: |
touch .env
echo CLIENT_TOKEN = ${{ secrets.BOT_TOKEN }} >> .env
echo GUILD_ID = ${{ secrets.GUILD_ID }} >> .env
echo CHANNEL_ID = ${{ secrets.CHANNEL_ID }} >> .env
echo MODEL = ${{ secrets.MODEL }} >> .env
echo CLIENT_UID = ${{ secrets.CLIENT_UID }} >> .env
echo OLLAMA_IP = ${{ secrets.OLLAMA_IP }} >> .env
echo OLLAMA_PORT = ${{ secrets.OLLAMA_PORT }} >> .env
echo ADMINS = ${{ secrets.ADMINS }} >> .env
# set -e ensures if nohup fails, this section fails
- name: Startup Discord Bot Client
run: |
set -e
nohup npm run prod &
Discord-Ollama-Container-Build: # test docker build and run
runs-on: ubuntu-latest
timeout-minutes: 2
steps:
- name: Checkout Repository
uses: actions/checkout@v4
- name: Set up Node Environment v18.18.2
uses: actions/setup-node@v4
with:
node-version: 18.18.2
cache: 'npm'
- name: Create Environment Variables
run: |
touch .env
echo CLIENT_TOKEN = ${{ secrets.BOT_TOKEN }} >> .env
echo GUILD_ID = ${{ secrets.GUILD_ID }} >> .env
echo CHANNEL_ID = ${{ secrets.CHANNEL_ID }} >> .env
echo MODEL = ${{ secrets.MODEL }} >> .env
echo CLIENT_UID = ${{ secrets.CLIENT_UID }} >> .env
echo OLLAMA_IP = ${{ secrets.OLLAMA_IP }} >> .env
echo OLLAMA_PORT = ${{ secrets.OLLAMA_PORT }} >> .env
echo ADMINS = ${{ secrets.ADMINS }} >> .env
- name: Setup Docker Network and Images
run: |
npm run docker:start-cpu
- name: Check Images Exist
run: |
(docker images | grep -q 'discord/bot' && docker images | grep -qE 'ollama/ollama') || exit 1
- name: Check Containers Exist
run: |
(docker ps | grep -q 'ollama' && docker ps | grep -q 'discord') || exit 1

View File

@@ -1,5 +1,26 @@
# Discord Ollama Integration [![License: CC BY-NC 4.0](https://img.shields.io/badge/License-CC_BY--NC_4.0-darkgreen.svg)](https://creativecommons.org/licenses/by-nc/4.0/) [![Release Badge](https://img.shields.io/github/v/release/kevinthedang/discord-ollama?logo=github)](https://github.com/kevinthedang/discord-ollama/releases/latest)
Ollama is an AI model management tool that allows users to install and use custom large language models locally. The goal is to create a discord bot that will utilize Ollama and chat with it on a Discord!
<div align="center">
<p><a href="#"><a href="https://ollama.ai/"><img alt="ollama" src="./imgs/ollama-icon.png" width="200px" /></a><img alt="+" src="./imgs/grey-plus.png" width="100px" /></a><a href="https://discord.com/"><img alt="discord" src="./imgs/discord-icon.png" width="190px" /></a></p>
<h1>Discord Ollama Integration</h1>
<h3><a href="#"></a>Ollama as your Discord AI Assistant</h3>
<p><a href="#"></a><a href="https://creativecommons.org/licenses/by-nc/4.0/"><img alt="License" src="https://img.shields.io/badge/License-CC_BY--NC_4.0-darkgreen.svg" /></a>
<a href="#"></a><a href="https://github.com/kevinthedang/discord-ollama/releases/latest"><img alt="Release" src="https://img.shields.io/github/v/release/kevinthedang/discord-ollama?logo=github" /></a>
<a href="#"></a><a href="https://github.com/kevinthedang/discord-ollama/actions/workflows/build-test.yml"><img alt="Build Status" src="https://github.com/kevinthedang/discord-ollama/actions/workflows/build-test.yml/badge.svg" /></a>
</div>
## About/Goals
Ollama is an AI model management tool that allows users to install and use custom large language models locally.
The project aims to:
* [x] Create a Discord bot that will utilize Ollama and chat to chat with users!
* [ ] User Preferences on Chat
* [ ] Message Persistance on Channels and Threads
* [x] Containerization with Docker
* [x] Slash Commands Compatible
* [ ] Generated Token Length Handling for >2000 or >6000 characters
* [ ] External WebUI Integration
* [ ] Administrator Role Compatible
* [ ] Allow others to create their own models personalized for their own servers!
* [ ] Documentation on creating your own LLM
* [ ] Documentation on web scrapping and cleaning
## Environment Setup
* Clone this repo using `git clone https://github.com/kevinthedang/discord-ollama.git` or just use [GitHub Desktop](https://desktop.github.com/) to clone the repo.

View File

@@ -8,7 +8,7 @@ services:
build: ./ # find docker file in designated path
container_name: discord
restart: always # rebuild container always
image: discord/bot:0.2.0
image: discord/bot:0.3.5
environment:
CLIENT_TOKEN: ${CLIENT_TOKEN}
GUILD_ID: ${GUILD_ID}
@@ -17,6 +17,7 @@ services:
CLIENT_UID: ${CLIENT_UID}
OLLAMA_IP: ${OLLAMA_IP}
OLLAMA_PORT: ${OLLAMA_PORT}
ADMINS: ${ADMINS}
networks:
ollama-net:
ipv4_address: ${DISCORD_IP}
@@ -32,9 +33,9 @@ services:
ollama-net:
ipv4_address: ${OLLAMA_IP}
# runtime: nvidia # use Nvidia Container Toolkit for GPU support
# devices:
# - /dev/nvidia0
runtime: nvidia # use Nvidia Container Toolkit for GPU support
devices:
- /dev/nvidia0
volumes:
- ollama:/root/.ollama
ports:

View File

@@ -2,6 +2,43 @@
* Follow this guide to setup [Docker](https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-on-ubuntu-20-04)
* If on Windows, download [Docker Desktop](https://docs.docker.com/desktop/install/windows-install/) to get the docker engine.
* Please also install [Docker Compose](https://docs.docker.com/compose/install/linux/) for easy running. If not, there are [scripts](#manual-run-with-docker) to set everything up.
* **IMPORTANT NOTE**: Currently, it seems like wsl does not like Nvidia Container Toolkit. It will work initially then reset it for some odd reason. For now, it is advised to use an actually Linux machine to run using Docker. If you do not care about utilizing your GPU or don't even have a Nvidia GPU then disregard this.
## Nvidia Container Toolkit Setup
### Installation with Apt
* Instructions can be found [here](https://github.com/kevinthedang/discord-ollama/issues/23) in **Steps to reproduce** or below:
* Step 1. Configure the production repository on machine:
```sh
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \
&& curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \
sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \
sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list
```
* Step 2. Update the packages list from the repository:
```sh
sudo apt-get update
```
* Step 3. Install the Nvidia Container Toolkit:
```sh
sudo apt-get install -y nvidia-container-toolkit
```
### Configurating with Docker
Step 1.Configure the container runtime by using the `nvidia-ctk` command:
```sh
sudo nvidia-ctk runtime configure --runtime=docker
```
The `nvidia-ctk` command modifies the `/etc/docker/daemon.json` file on the host. The file is updated so that Docker can use the NVIDIA Container Runtime.
Step 2. Restart the Docker daemon:
```sh
sudo systemctl restart docker
```
### References for setup
* Guide to installing [Nvidia Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html)
* [GitHub repository](https://github.com/NVIDIA/nvidia-container-toolkit?tab=readme-ov-file) for Nvidia Container Toolkit
## To Run (with Docker and Docker Compose)
* With the inclusion of subnets in the `docker-compose.yml`, you will need to set the `SUBNET_ADDRESS`, `OLLAMA_IP`, `OLLAMA_PORT`, and `DISCORD_IP`. Here are some default values if you don't care:

BIN
imgs/discord-icon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 94 KiB

BIN
imgs/grey-plus.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.5 KiB

BIN
imgs/ollama-icon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 49 KiB

4
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "discord-ollama",
"version": "0.3.0",
"version": "0.3.5",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "discord-ollama",
"version": "0.3.0",
"version": "0.3.5",
"license": "ISC",
"dependencies": {
"axios": "^1.6.2",

View File

@@ -1,6 +1,6 @@
{
"name": "discord-ollama",
"version": "0.3.0",
"version": "0.3.5",
"description": "Ollama Integration into discord",
"main": "build/index.js",
"exports": "./build/index.js",
@@ -10,14 +10,16 @@
"build": "tsc",
"prod": "node .",
"client": "npm run build && npm run prod",
"clean": "docker compose down && docker rmi $(docker images | grep 0.2.0 | tr -s ' ' | cut -d ' ' -f 3) && docker rmi $(docker images --filter \"dangling=true\" -q --no-trunc)",
"clean": "docker compose down && docker rmi $(docker images | grep $(node -p \"require('./package.json').version\") | tr -s ' ' | cut -d ' ' -f 3) && docker rmi $(docker images --filter \"dangling=true\" -q --no-trunc)",
"start": "docker compose build --no-cache && docker compose up -d",
"docker:start": "npm run docker:network && npm run docker:build && npm run docker:client && npm run docker:ollama",
"docker:start-cpu": "npm run docker:network && npm run docker:build && npm run docker:client && npm run docker:ollama-cpu",
"docker:clean": "docker rmi $(docker images --filter \"dangling=true\" -q --no-trunc)",
"docker:network": "docker network create --subnet=172.18.0.0/16 ollama-net",
"docker:build": "docker build --no-cache -t discord/bot:0.2.0 .",
"docker:client": "docker run -d -v discord:/src/app --name discord --network ollama-net --ip 172.18.0.3 discord",
"docker:ollama": "docker run -d --gpus=all -v ollama:/root/.ollama -p 11434:11434 --name ollama --network ollama-net --ip 172.18.0.2 ollama/ollama:latest"
"docker:build": "docker build --no-cache -t discord/bot:$(node -p \"require('./package.json').version\") .",
"docker:client": "docker run -d -v discord:/src/app --name discord --network ollama-net --ip 172.18.0.3 discord/bot:$(node -p \"require('./package.json').version\")",
"docker:ollama": "docker run -d --gpus=all -v ollama:/root/.ollama -p 11434:11434 --name ollama --network ollama-net --ip 172.18.0.2 ollama/ollama:latest",
"docker:ollama-cpu": "docker run -d -v ollama:/root/.ollama -p 11434:11434 --name ollama --network ollama-net --ip 172.18.0.2 ollama/ollama:latest"
},
"author": "Kevin Dang",
"license": "ISC",

View File

@@ -2,6 +2,7 @@ import { Client, GatewayIntentBits } from 'discord.js'
import { UserMessage, registerEvents } from './utils/events.js'
import Events from './events/index.js'
import { Ollama } from 'ollama'
import { Queue } from './queues/queue.js'
// Import keys/tokens
import Keys from './keys.js'
@@ -23,12 +24,7 @@ const ollama = new Ollama({
})
// Create Queue managed by Events
const messageHistory: [UserMessage] = [
{
role: 'system',
content: 'Your name is Ollama GU'
}
]
const messageHistory: Queue<UserMessage> = new Queue<UserMessage>
/**
* register events for bot to listen to in discord
@@ -44,4 +40,10 @@ await client.login(Keys.clientToken)
.catch((error) => {
console.error('[Login Error]', error)
process.exit(1)
})
// queue up bots name
messageHistory.enqueue({
role: 'assistant',
content: `My name is ${client.user?.username}`
})

33
src/commands/disable.ts Normal file
View File

@@ -0,0 +1,33 @@
import { ChannelType, Client, CommandInteraction, ApplicationCommandOptionType } from 'discord.js'
import { SlashCommand } from '../utils/commands.js'
import { openFile } from '../utils/jsonHandler.js'
export const Disable: SlashCommand = {
name: 'toggle-chat',
description: 'toggle all chat features, slash commands will still work.',
// set available user options to pass to the command
options: [
{
name: 'enabled',
description: 'true = enabled, false = disabled',
type: ApplicationCommandOptionType.Boolean,
required: true
}
],
// Query for message information and set the style
run: async (client: Client, interaction: CommandInteraction) => {
// fetch channel and message
const channel = await client.channels.fetch(interaction.channelId)
if (!channel || channel.type !== ChannelType.GuildText) return
// set state of bot chat features
openFile('config.json', interaction.commandName, interaction.options.get('enabled')?.value)
interaction.reply({
content: `Chat features has been \`${interaction.options.get('enabled')?.value ? "enabled" : "disabled" }\``,
ephemeral: true
})
}
}

View File

@@ -2,9 +2,13 @@ import { SlashCommand } from '../utils/commands.js'
import { ThreadCreate } from './threadCreate.js'
import { MessageStyle } from './messageStyle.js'
import { MessageStream } from './messageStream.js'
import { Disable } from './disable.js'
import { Shutoff } from './shutoff.js'
export default [
ThreadCreate,
MessageStyle,
MessageStream
MessageStream,
Disable,
Shutoff
] as SlashCommand[]

54
src/commands/shutoff.ts Normal file
View File

@@ -0,0 +1,54 @@
import { ChannelType, Client, CommandInteraction, ApplicationCommandOptionType } from 'discord.js'
import { SlashCommand } from '../utils/commands.js'
import Keys from '../keys.js'
export const Shutoff: SlashCommand = {
name: 'shutoff',
description: 'shutdown the bot. You will need to manually bring it online again.',
// set available user options to pass to the command
options: [
{
name: 'are-you-sure',
description: 'true = yes, false = I\'m scared',
type: ApplicationCommandOptionType.Boolean,
required: true
}
],
// Query for message information and set the style
run: async (client: Client, interaction: CommandInteraction) => {
// fetch channel and message
const channel = await client.channels.fetch(interaction.channelId)
if (!channel || channel.type !== ChannelType.GuildText) return
// log this, this will probably be improtant for logging who did this
console.log(`User -> ${interaction.user.tag} attempting to shutdown ${client.user!!.tag}`)
// create list of superUsers based on string parse
const superUsers: string[] = JSON.parse(Keys.superUser.replace(/'/g, '"'))
// check if admin or false on shutdown
if (interaction.user.tag !in superUsers) {
interaction.reply({
content: `Shutdown failed:\n\n${interaction.user.tag}, You do not have permission to shutoff **${client.user?.tag}**.`,
ephemeral: true
})
return // stop from shutting down
} else if (!interaction.options.get('are-you-sure')?.value) {
interaction.reply({
content: `Shutdown failed:\n\n${interaction.user.tag}, You didn't want to shutoff **${client.user?.tag}**.`,
ephemeral: true
})
return
}
interaction.reply({
content: `${client.user?.tag} is ${interaction.options.get('are-you-sure')?.value ? "shutting down now." : "not shutting down." }`,
ephemeral: true
})
// clean up client instance and stop
client.destroy()
}
}

View File

@@ -1,6 +1,6 @@
import { ChatResponse } from 'ollama'
import { embedMessage, event, Events, normalMessage } from '../utils/index.js'
import { Configuration, getConfig } from '../utils/jsonHandler.js'
import { Configuration, getConfig, openFile } from '../utils/jsonHandler.js'
/**
* Max Message length for free users is 2000 characters (bot or not).
@@ -18,8 +18,11 @@ export default event(Events.MessageCreate, async ({ log, msgHist, tokens, ollama
// Only respond if message mentions the bot
if (!message.mentions.has(tokens.clientUid)) return
// check if we can push, if not, remove oldest
if (msgHist.size() === msgHist.getCapacity()) msgHist.dequeue()
// push user response
msgHist.push({
msgHist.enqueue({
role: 'user',
content: message.content
})
@@ -28,16 +31,23 @@ export default event(Events.MessageCreate, async ({ log, msgHist, tokens, ollama
try {
const config: Configuration = await new Promise((resolve, reject) => {
getConfig('config.json', (config) => {
// check if config.json exists
if (config === undefined) {
reject(new Error('No Configuration is set up.'))
reject(new Error('No Configuration is set up.\n\nCreating \`config.json\` with \`message-style\` set as \`true\` for embedded messages.\nPlease try chatting again.'))
return
}
// check if chat is disabled
if(!config.options['toggle-chat']) {
reject(new Error('Admin(s) have disabled chat features.\n\n Please contact your server\'s admin(s).'))
return
}
resolve(config)
})
})
let response: ChatResponse
let response: ChatResponse
// undefined or false, use normal, otherwise use embed
if (config.options['message-style'])
response = await embedMessage(message, ollama, tokens, msgHist)
@@ -47,13 +57,17 @@ export default event(Events.MessageCreate, async ({ log, msgHist, tokens, ollama
// If something bad happened, remove user query and stop
if (response == undefined) { msgHist.pop(); return }
// if queue is full, remove the oldest message
if (msgHist.size() === msgHist.getCapacity()) msgHist.dequeue()
// successful query, save it as history
msgHist.push({
msgHist.enqueue({
role: 'assistant',
content: response.message.content
})
} catch (error: any) {
msgHist.pop() // remove message because of failure
message.reply(`**Response generation failed.**\n\nReason: ${error.message}\n\nPlease use any config slash command.`)
openFile('config.json', 'message-style', true)
message.reply(`**Error Occurred:**\n\n**Reason:** *${error.message}*`)
}
})

View File

@@ -7,7 +7,8 @@ export const Keys = {
clientUid: getEnvVar('CLIENT_UID'),
guildId: getEnvVar('GUILD_ID'),
ipAddress: getEnvVar('OLLAMA_IP'),
portAddress: getEnvVar('OLLAMA_PORT')
portAddress: getEnvVar('OLLAMA_PORT'),
superUser: getEnvVar('ADMINS')
} as const // readonly keys
export default Keys

70
src/queues/queue.ts Normal file
View File

@@ -0,0 +1,70 @@
// Queue interfaces for any queue class to follow
interface IQueue<T> {
enqueue(item: T): void
dequeue(): T | undefined
size(): number
}
/**
* Queue for UserMessages
* When the limit for messages is met, we want to clear
* out the oldest message in the queue
*/
export class Queue<T> implements IQueue<T> {
private storage: T[] = []
/**
* Set up Queue
* @param capacity max length of queue
*/
constructor(private capacity: number = 5) {}
/**
* Put item in front of queue
* @param item object of type T to add into queue
*/
enqueue(item: T): void {
if (this.size() === this.capacity)
throw Error('Queue has reached max capacity, you cannot add more items.')
this.storage.push(item)
}
/**
* Remove item at end of queue
* @returns object of type T in queue
*/
dequeue(): T | undefined {
return this.storage.shift()
}
/**
* Size of the queue
* @returns length of queue as a int/number
*/
size(): number {
return this.storage.length
}
/**
* Remove the front of the queue, typically for errors
*/
pop(): void {
this.storage.pop()
}
/**
* Geet the queue as an array
* @returns a array of T items
*/
getItems(): T[] {
return this.storage
}
/**
* Get capacity of the queue
* @returns capacity of queue
*/
getCapacity(): number {
return this.capacity
}
}

View File

@@ -1,5 +1,6 @@
import type { ClientEvents, Awaitable, Client } from 'discord.js'
import type { ClientEvents, Awaitable, Client, User } from 'discord.js'
import { Ollama } from 'ollama'
import { Queue } from '../queues/queue.js'
// Export events through here to reduce amount of imports
export { Events } from 'discord.js'
@@ -33,7 +34,7 @@ export type UserMessage = {
export interface EventProps {
client: Client
log: LogMethod
msgHist: { role: string, content: string }[]
msgHist: Queue<UserMessage>
tokens: Tokens,
ollama: Ollama
}
@@ -63,7 +64,7 @@ export function event<T extends EventKeys>(key: T, callback: EventCallback<T>):
export function registerEvents(
client: Client,
events: Event[],
msgHist: UserMessage[],
msgHist: Queue<UserMessage>,
tokens: Tokens,
ollama: Ollama
): void {

View File

@@ -4,7 +4,8 @@ export interface Configuration {
readonly name: string
options: {
'message-stream'?: boolean,
'message-style'?: boolean
'message-style'?: boolean,
'toggle-chat'?: boolean
}
}

View File

@@ -1,6 +1,7 @@
import { EmbedBuilder, Message } from 'discord.js'
import { ChatResponse, Ollama } from 'ollama'
import { UserMessage } from './events.js'
import { Queue } from '../queues/queue.js'
/**
* Method to send replies as normal text on discord like any other user
@@ -15,7 +16,7 @@ export async function embedMessage(
channel: string,
model: string
},
msgHist: UserMessage[]
msgHist: Queue<UserMessage>
) {
// bot response
let response: ChatResponse
@@ -33,7 +34,7 @@ export async function embedMessage(
// Attempt to query model for message
response = await ollama.chat({
model: tokens.model,
messages: msgHist,
messages: msgHist.getItems(),
options: {
num_thread: 8, // remove if optimization needed further
mirostat: 1,

View File

@@ -1,6 +1,7 @@
import { Message } from 'discord.js'
import { ChatResponse, Ollama } from 'ollama'
import { UserMessage } from './events.js'
import { Queue } from '../queues/queue.js'
/**
* Method to send replies as normal text on discord like any other user
@@ -15,7 +16,7 @@ export async function normalMessage(
channel: string,
model: string
},
msgHist: UserMessage[]
msgHist: Queue<UserMessage>
) {
// bot's respnse
let response: ChatResponse
@@ -25,7 +26,7 @@ export async function normalMessage(
// Attempt to query model for message
response = await ollama.chat({
model: tokens.model,
messages: msgHist,
messages: msgHist.getItems(),
options: {
num_thread: 8, // remove if optimization needed further
mirostat: 1,