Compare commits

...

15 Commits

Author SHA1 Message Date
snyk-bot
e07f10506a fix: upgrade dotenv from 16.4.7 to 16.5.0
Snyk has created this PR to upgrade dotenv from 16.4.7 to 16.5.0.

See this package in npm:
dotenv

See this project in Snyk:
https://app.snyk.io/org/jt2m0l3y/project/d8b070a3-e4a3-457a-977b-7eb6a4a48346?utm_source=github&utm_medium=referral&page=upgrade-pr
2025-05-08 08:18:40 +00:00
Kevin Dang
a5faca87aa Fix: missing model env for docker (#172) 2025-04-18 19:42:18 -07:00
Jonathan Smoley
4c96b3863a Upgrade Dependencies (#164) 2025-03-28 10:00:50 -07:00
Kevin Dang
40783818b9 Upgrade Npm Packages (#159)
* Update: upgrade packages

* Update: add in all packages

* Update: fix whitespace in events

---------

Co-authored-by: JT2M0L3Y <jtsmoley@icloud.com>
2025-02-23 21:00:53 -07:00
Kevin Dang
ed0d8600df Deploy Badge (#163) 2025-02-22 15:23:01 -08:00
Kevin Dang
03939ef562 Server Deployment Scripts (#162) 2025-02-22 14:06:14 -08:00
Jonathan Smoley
456f70b9e1 Deprecated ephemeral field (#158)
* Update: ephemeral flag added in place of field

* Update: remove unused import

* Update: version increment

---------

Co-authored-by: Kevin Dang <kevinthedang_1@outlook.com>
2025-02-02 15:10:58 -08:00
Jonathan Smoley
5b542aca1a [Snyk] Upgrade discord.js from 14.16.3 to 14.17.3 (#155) 2025-01-31 16:23:31 -08:00
Kevin Dang
2a39e20fee Text Files As Prompts (#156)
* Add: .txt file reading

* Update: version increment
2025-01-31 14:12:11 -08:00
Jonathan Smoley
2ea77c92f0 Prepare Redis Environment (#133)
* add redis container

* Updated Guides and Goals  (#134)

* Update README.md

* Update commands-guide.md

* Update events-guide.md

* Update commands-guide.md

* Added: redis client

* Fixed: redis mock in commands.test.ts

* Updated: npm package patches

* Fixed: redis ip name in keys.ts

* update Node LTS version, workflow env vars

* Updated: node package engine requirements

* Updated: documentation

* fix: upgrade dotenv from 16.4.5 to 16.4.7 (#152)

Snyk has created this PR to upgrade dotenv from 16.4.5 to 16.4.7.

See this package in npm:
dotenv

See this project in Snyk:
https://app.snyk.io/org/jt2m0l3y/project/d8b070a3-e4a3-457a-977b-7eb6a4a48346?utm_source=github&utm_medium=referral&page=upgrade-pr

Co-authored-by: snyk-bot <snyk-bot@snyk.io>

* Update: docs patches, connection ordering

---------

Co-authored-by: snyk-bot <snyk-bot@snyk.io>
2024-12-30 15:53:29 -08:00
Kevin Dang
6c7e48d369 Delete Model Command (#150)
* Add: Delete Model Command

* Update: version increment

* Update: new command to tests
2024-12-14 17:06:08 -08:00
Kevin Dang
fe1f7ce5ec Remove Message Style Command (#149)
* Remove: Message Style Command

* Update: version increment
2024-12-13 16:55:57 -08:00
Kevin Dang
6ac45afb13 Streamlined Preferences Setup and Default Model (#148)
* Update: Streamlinded setup and Default Model

* Update: version increment
2024-12-11 17:53:35 -08:00
Kevin Dang
d570a50d46 Pull and Switch Model Revised (#142)
* Update: pull-model only runnable by admins now

* Update: switch-model cannot pull models anymore

* Update: less technical responses

* Update: version increment
2024-12-04 21:29:01 -08:00
Kevin Dang
1c8449d578 Code Owners File (#140)
* Add: codeowners file

* Fix: Spelling error
2024-11-23 14:51:17 -08:00
46 changed files with 1319 additions and 1453 deletions

View File

@@ -1,6 +1,9 @@
# Discord token for the bot # Discord token for the bot
CLIENT_TOKEN = BOT_TOKEN CLIENT_TOKEN = BOT_TOKEN
# Default model for new users
MODEL = DEFAULT_MODEL
# ip/port address of docker container, I use 172.18.0.3 for docker, 127.0.0.1 for local # ip/port address of docker container, I use 172.18.0.3 for docker, 127.0.0.1 for local
OLLAMA_IP = IP_ADDRESS OLLAMA_IP = IP_ADDRESS
OLLAMA_PORT = PORT OLLAMA_PORT = PORT
@@ -10,3 +13,7 @@ DISCORD_IP = IP_ADDRESS
# subnet address, ex. 172.18.0.0 as we use /16. # subnet address, ex. 172.18.0.0 as we use /16.
SUBNET_ADDRESS = ADDRESS SUBNET_ADDRESS = ADDRESS
# redis port and ip, default redis port is 6379
REDIS_IP = IP_ADDRESS
REDIS_PORT = PORT

View File

@@ -13,10 +13,10 @@ jobs:
- name: Checkout Repository - name: Checkout Repository
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Set up Node Environment lts/hydrogen - name: Set up Node Environment lts/jod
uses: actions/setup-node@v4 uses: actions/setup-node@v4
with: with:
node-version: lts/hydrogen node-version: lts/jod
cache: "npm" cache: "npm"
- name: Install Project Dependencies - name: Install Project Dependencies
@@ -33,6 +33,9 @@ jobs:
echo CLIENT_TOKEN = ${{ secrets.BOT_TOKEN }} >> .env echo CLIENT_TOKEN = ${{ secrets.BOT_TOKEN }} >> .env
echo OLLAMA_IP = ${{ secrets.OLLAMA_IP }} >> .env echo OLLAMA_IP = ${{ secrets.OLLAMA_IP }} >> .env
echo OLLAMA_PORT = ${{ secrets.OLLAMA_PORT }} >> .env echo OLLAMA_PORT = ${{ secrets.OLLAMA_PORT }} >> .env
echo MODEL = ${{ secrets.MODEL }} >> .env
echo REDIS_IP = ${{ secrets.REDIS_IP }} >> .env
echo REDIS_PORT = ${{ secrets.REDIS_PORT }} >> .env
# set -e ensures if nohup fails, this section fails # set -e ensures if nohup fails, this section fails
- name: Startup Discord Bot Client - name: Startup Discord Bot Client
@@ -47,10 +50,10 @@ jobs:
- name: Checkout Repository - name: Checkout Repository
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Set up Node Environment lts/hydrogen - name: Set up Node Environment lts/jod
uses: actions/setup-node@v4 uses: actions/setup-node@v4
with: with:
node-version: lts/hydrogen node-version: lts/jod
cache: "npm" cache: "npm"
- name: Create Environment Variables - name: Create Environment Variables
@@ -59,6 +62,9 @@ jobs:
echo CLIENT_TOKEN = ${{ secrets.BOT_TOKEN }} >> .env echo CLIENT_TOKEN = ${{ secrets.BOT_TOKEN }} >> .env
echo OLLAMA_IP = ${{ secrets.OLLAMA_IP }} >> .env echo OLLAMA_IP = ${{ secrets.OLLAMA_IP }} >> .env
echo OLLAMA_PORT = ${{ secrets.OLLAMA_PORT }} >> .env echo OLLAMA_PORT = ${{ secrets.OLLAMA_PORT }} >> .env
echo MODEL = ${{ secrets.MODEL }} >> .env
echo REDIS_IP = ${{ secrets.REDIS_IP }} >> .env
echo REDIS_PORT = ${{ secrets.REDIS_PORT }} >> .env
- name: Setup Docker Network and Images - name: Setup Docker Network and Images
run: | run: |
@@ -66,8 +72,8 @@ jobs:
- name: Check Images Exist - name: Check Images Exist
run: | run: |
(docker images | grep -q 'kevinthedang/discord-ollama' && docker images | grep -qE 'ollama/ollama') || exit 1 (docker images | grep -q 'kevinthedang/discord-ollama' && docker images | grep -qE 'ollama/ollama' | docker images | grep -qE 'redis') || exit 1
- name: Check Containers Exist - name: Check Containers Exist
run: | run: |
(docker ps | grep -q 'ollama' && docker ps | grep -q 'discord') || exit 1 (docker ps | grep -q 'ollama' && docker ps | grep -q 'discord' && docker ps | grep -q 'redis') || exit 1

View File

@@ -14,10 +14,10 @@ jobs:
- name: Checkout Repository - name: Checkout Repository
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Set up Node Environment lts/hydrogen - name: Set up Node Environment lts/jod
uses: actions/setup-node@v4 uses: actions/setup-node@v4
with: with:
node-version: lts/hydrogen node-version: lts/jod
cache: "npm" cache: "npm"
- name: Install Project Dependencies - name: Install Project Dependencies
@@ -30,6 +30,9 @@ jobs:
echo CLIENT_TOKEN = ${{ secrets.BOT_TOKEN }} >> .env echo CLIENT_TOKEN = ${{ secrets.BOT_TOKEN }} >> .env
echo OLLAMA_IP = ${{ secrets.OLLAMA_IP }} >> .env echo OLLAMA_IP = ${{ secrets.OLLAMA_IP }} >> .env
echo OLLAMA_PORT = ${{ secrets.OLLAMA_PORT }} >> .env echo OLLAMA_PORT = ${{ secrets.OLLAMA_PORT }} >> .env
echo MODEL = ${{ secrets.MODEL }} >> .env
echo REDIS_IP = ${{ secrets.REDIS_IP }} >> .env
echo REDIS_PORT = ${{ secrets.REDIS_PORT }} >> .env
- name: Collect Code Coverage - name: Collect Code Coverage
run: | run: |

133
.github/workflows/deploy.yml vendored Normal file
View File

@@ -0,0 +1,133 @@
name: Deploy
run-name: Deploy Application Latest
on:
push:
tags:
- 'v*'
jobs:
Deploy-Application:
runs-on: self-hosted
environment: deploy
timeout-minutes: 5
steps:
- name: Checkout Repo
uses: actions/checkout@v4
# Generate Secret File for Compose case
- name: Create Environment Variables
run: |
touch .env
echo CLIENT_TOKEN = ${{ secrets.CLIENT }} >> .env
echo OLLAMA_IP = ${{ secrets.OLLAMA_IP }} >> .env
echo OLLAMA_PORT = ${{ secrets.OLLAMA_PORT }} >> .env
echo MODEL = ${{ secrets.MODEL }} >> .env
echo DISCORD_IP = ${{ secrets.DISCORD_IP }} >> .env
echo SUBNET_ADDRESS = ${{ secrets.SUBNET_ADDRESS }} >> .env
echo REDIS_IP = ${{ secrets.REDIS_IP }} >> .env
echo REDIS_PORT = ${{ secrets.REDIS_PORT }} >> .env
- name: Check if directory exists and delete it
run: |
if [ -d "${{ secrets.PATH }}" ]; then
echo "Directory exists, deleting old version..."
rm -rf ${{ secrets.PATH }}
else
echo "Directory does not exist."
fi
- name: Clone Repo onto Server
run: |
git clone https://github.com/kevinthedang/discord-ollama.git ${{ secrets.PATH }}
cd ${{ secrets.PATH }}
- name: Install nvm and Node.js lts/jod
run: |
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.1/install.sh | bash
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"
echo "NVM installed successfully."
nvm install lts/jod
nvm alias default lts/jod
node -v
npm -v
- name: Build Application
run: |
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"
npm install
IMAGE="kevinthedang/discord-ollama"
REDIS="redis"
OLLAMA="ollama/ollama"
if docker images | grep -q $IMAGE; then
IMAGE_ID=$(docker images -q $IMAGE)
CONTAINER_IDS=$(docker ps -q --filter "ancestor=$IMAGE_ID")
if [ ! -z "$CONTAINER_IDS" ]; then
# Stop and remove the running containers
docker stop $CONTAINER_IDS
echo "Stopped and removed the containers using the image $IMAGE"
fi
docker rmi $IMAGE_ID
echo "Old $IMAGE Image Removed"
fi
if docker images | grep -q $REDIS; then
IMAGE_ID=$(docker images -q $REDIS)
CONTAINER_IDS=$(docker ps -q --filter "ancestor=$IMAGE_ID")
if [ ! -z "$CONTAINER_IDS" ]; then
# Stop and remove the running containers
docker stop $CONTAINER_IDS
echo "Stopped and removed the containers using the image $REDIS"
fi
docker rmi $IMAGE_ID
echo "Old $REDIS Image Removed"
fi
if docker images | grep -q $OLLAMA; then
IMAGE_ID=$(docker images -q $OLLAMA)
CONTAINER_IDS=$(docker ps -q --filter "ancestor=$IMAGE_ID")
if [ ! -z "$CONTAINER_IDS" ]; then
# Stop and remove the running containers
docker stop $CONTAINER_IDS
echo "Stopped and removed the containers using the image $OLLAMA"
fi
docker rmi $IMAGE_ID
echo "Old $OLLAMA Image Removed"
fi
docker network prune -f
docker system prune -a -f
npm run docker:build-latest
- name: Start Application
run: |
docker network create --subnet=${{ secrets.SUBNET_ADDRESS }}/16 ollama-net || true
docker run --rm -d \
-v ollama:/root/.ollama \
-p ${{ secrets.OLLAMA_PORT }}:${{ secrets.OLLAMA_PORT }} \
--name ollama \
--network ollama-net \
--ip ${{ secrets.OLLAMA_IP }} \
ollama/ollama:latest
docker run --rm -d \
-v redis:/root/.redis \
-p ${{ secrets.REDIS_PORT }}:${{ secrets.REDIS_PORT }} \
--name redis \
--network ollama-net \
--ip ${{ secrets.REDIS_IP }} \
redis:latest
docker run --rm -d \
-v discord:/src/app \
--name discord \
--network ollama-net \
--ip ${{ secrets.DISCORD_IP }} \
kevinthedang/discord-ollama

View File

@@ -1,48 +0,0 @@
name: Deploy
run-name: Release Docker Image
on:
push:
tags:
- 'v*'
jobs:
Release-Docker-Image:
runs-on: ubuntu-latest
environment: release
timeout-minutes: 3
steps:
- name: Checkout Repository
uses: actions/checkout@v4
- name: Set up Node Environment lts/hydrogen
uses: actions/setup-node@v4
with:
node-version: lts/hydrogen
cache: "npm"
- name: Create Environment Variables
run: |
touch .env
echo CLIENT_TOKEN = NOT_REAL_TOKEN >> .env
echo OLLAMA_IP = ${{ secrets.OLLAMA_IP }} >> .env
echo OLLAMA_PORT = ${{ secrets.OLLAMA_PORT }} >> .env
- name: Get Version from package.json
run: echo "VERSION=$(jq -r '.version' package.json)" >> $GITHUB_ENV
- name: Build Image
run: |
npm run docker:build
- name: Build Image as Latest
run: |
npm run docker:build-latest
- name: Log into Docker
run: |
docker login --username ${{ vars.DOCKER_USER }} --password ${{ secrets.DOCKER_PASS }}
- name: Release Docker Image
run: |
docker push ${{ vars.DOCKER_USER }}/discord-ollama:${{ env.VERSION }}
docker push ${{ vars.DOCKER_USER }}/discord-ollama:latest

View File

@@ -25,10 +25,10 @@ jobs:
- name: Checkout Repository - name: Checkout Repository
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Set up Node Environment lts/hydrogen - name: Set up Node Environment lts/jod
uses: actions/setup-node@v4 uses: actions/setup-node@v4
with: with:
node-version: lts/hydrogen node-version: lts/jod
cache: "npm" cache: "npm"
- name: Install Project Dependencies - name: Install Project Dependencies
@@ -41,6 +41,9 @@ jobs:
echo CLIENT_TOKEN = ${{ secrets.BOT_TOKEN }} >> .env echo CLIENT_TOKEN = ${{ secrets.BOT_TOKEN }} >> .env
echo OLLAMA_IP = ${{ secrets.OLLAMA_IP }} >> .env echo OLLAMA_IP = ${{ secrets.OLLAMA_IP }} >> .env
echo OLLAMA_PORT = ${{ secrets.OLLAMA_PORT }} >> .env echo OLLAMA_PORT = ${{ secrets.OLLAMA_PORT }} >> .env
echo MODEL = ${{ secrets.MODEL }} >> .env
echo REDIS_IP = ${{ secrets.REDIS_IP }} >> .env
echo REDIS_PORT = ${{ secrets.REDIS_PORT }} >> .env
- name: Test Application - name: Test Application
run: | run: |

24
CODEOWNERS Normal file
View File

@@ -0,0 +1,24 @@
# The further along the ownership is, the more precedence it has.
# This to make sure the right people look at certain changes.
# Last Edited: 11/23/2024
# Author: Kevin Dang
# These owners will be the default owners
# for everything in the repo. However it's
# only for the rest of the files not declared by the
# following ownerships below.
* @kevinthedang @JT2M0L3Y
# Technical/Business Code Ownership
/src/ @kevinthedang @JT2M0L3Y
/tests/ @JT2M0L3Y
/.github/ @kevinthedang
# Docker Ownership
Dockerfile @kevinthedang
docker-compose.yml @kevinthedang
# Documentation Ownership
/docs/ @kevinthedang
/imgs/ @kevinthedang

View File

@@ -1,5 +1,5 @@
# use node LTS image for version 18 # use node LTS image for version 22
FROM node:hydrogen-alpine FROM node:jod-alpine
# set working directory inside container # set working directory inside container
WORKDIR /app WORKDIR /app

View File

@@ -5,7 +5,7 @@
<p><a href="#"></a><a href="https://creativecommons.org/licenses/by/4.0/"><img alt="License" src="https://img.shields.io/badge/License-CC_BY_4.0-darkgreen.svg" /></a> <p><a href="#"></a><a href="https://creativecommons.org/licenses/by/4.0/"><img alt="License" src="https://img.shields.io/badge/License-CC_BY_4.0-darkgreen.svg" /></a>
<a href="#"></a><a href="https://github.com/kevinthedang/discord-ollama/releases/latest"><img alt="Release" src="https://img.shields.io/github/v/release/kevinthedang/discord-ollama?logo=github" /></a> <a href="#"></a><a href="https://github.com/kevinthedang/discord-ollama/releases/latest"><img alt="Release" src="https://img.shields.io/github/v/release/kevinthedang/discord-ollama?logo=github" /></a>
<a href="#"></a><a href="https://github.com/kevinthedang/discord-ollama/actions/workflows/build.yml"><img alt="Build Status" src="https://github.com/kevinthedang/discord-ollama/actions/workflows/build.yml/badge.svg" /></a> <a href="#"></a><a href="https://github.com/kevinthedang/discord-ollama/actions/workflows/build.yml"><img alt="Build Status" src="https://github.com/kevinthedang/discord-ollama/actions/workflows/build.yml/badge.svg" /></a>
<a href="#"></a><a href="https://github.com/kevinthedang/discord-ollama/actions/workflows/release.yml"><img alt="Release Status" src="https://github.com/kevinthedang/discord-ollama/actions/workflows/release.yml/badge.svg" /></a> <a href="#"></a><a href="https://github.com/kevinthedang/discord-ollama/actions/workflows/deploy.yml"><img alt="Deploy Status" src="https://github.com/kevinthedang/discord-ollama/actions/workflows/deploy.yml/badge.svg" /></a>
<a href="#"></a><a href="https://github.com/kevinthedang/discord-ollama/actions/workflows/test.yml"><img alt="Testing Status" src="https://github.com/kevinthedang/discord-ollama/actions/workflows/test.yml/badge.svg" /></a> <a href="#"></a><a href="https://github.com/kevinthedang/discord-ollama/actions/workflows/test.yml"><img alt="Testing Status" src="https://github.com/kevinthedang/discord-ollama/actions/workflows/test.yml/badge.svg" /></a>
<a href="#"></a><a href="https://github.com/kevinthedang/discord-ollama/actions/workflows/coverage.yml"><img alt="Code Coverage" src="https://img.shields.io/endpoint?url=https://gist.githubusercontent.com/kevinthedang/bc7b5dcfa16561ab02bb3df67a99b22d/raw/coverage.json"></a> <a href="#"></a><a href="https://github.com/kevinthedang/discord-ollama/actions/workflows/coverage.yml"><img alt="Code Coverage" src="https://img.shields.io/endpoint?url=https://gist.githubusercontent.com/kevinthedang/bc7b5dcfa16561ab02bb3df67a99b22d/raw/coverage.json"></a>
</div> </div>
@@ -27,12 +27,13 @@ The project aims to:
* [x] Administrator Role Compatible * [x] Administrator Role Compatible
* [x] Multi-User Chat Generation (Multiple users chatting at the same time) - This was built in from Ollama `v0.2.1+` * [x] Multi-User Chat Generation (Multiple users chatting at the same time) - This was built in from Ollama `v0.2.1+`
* [x] Automatic and Manual model pulling through the Discord client * [x] Automatic and Manual model pulling through the Discord client
* [ ] Allow others to create their own models personalized for their own servers!
* [ ] Documentation on creating your own LLM Further, Ollama provides the functionality to utilize custom models or provide context for the top-layer of any model available through the Ollama model library.
* [ ] Documentation on web scrapping and cleaning * [Customize a model](https://github.com/ollama/ollama#customize-a-model)
* [Modelfile Docs](https://github.com/ollama/ollama/blob/main/docs/modelfile.md)
## Documentation ## Documentation
These are guides to the feature set included and the events triggered in this app. These are guides to the features and capabilities of this app.
* [User Slash Commands](./docs/commands-guide.md) * [User Slash Commands](./docs/commands-guide.md)
* [Client Events](./docs/events-guide.md) * [Client Events](./docs/events-guide.md)
@@ -49,10 +50,12 @@ These are guides to the feature set included and the events triggered in this ap
## Resources ## Resources
* [NodeJS](https://nodejs.org/en) * [NodeJS](https://nodejs.org/en)
* This project runs on `lts\hydrogen`. * This project runs on `lts\jod` and above.
* This project supports any NodeJS version above `16.x.x` to only allow ESModules. * This project requires the use of npm version `10.9.0` or above.
* [Ollama](https://ollama.com/) * [Ollama](https://ollama.com/)
* [Ollama Docker Image](https://hub.docker.com/r/ollama/ollama) * [Ollama Docker Image](https://hub.docker.com/r/ollama/ollama)
* [Redis](https://redis.io/)
* [Redis Docker Image](https://hub.docker.com/_/redis)
* [Discord.js Docs](https://discord.js.org/docs/packages/discord.js/main) * [Discord.js Docs](https://discord.js.org/docs/packages/discord.js/main)
* [Setting up Docker (Ubuntu 20.04)](https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-on-ubuntu-20-04) * [Setting up Docker (Ubuntu 20.04)](https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-on-ubuntu-20-04)
* [Setting up Nvidia Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) * [Setting up Nvidia Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html)

View File

@@ -7,11 +7,14 @@ services:
build: ./ # find docker file in designated path build: ./ # find docker file in designated path
container_name: discord container_name: discord
restart: always # rebuild container always restart: always # rebuild container always
image: kevinthedang/discord-ollama:0.7.2 image: kevinthedang/discord-ollama:0.8.4
environment: environment:
CLIENT_TOKEN: ${CLIENT_TOKEN} CLIENT_TOKEN: ${CLIENT_TOKEN}
OLLAMA_IP: ${OLLAMA_IP} OLLAMA_IP: ${OLLAMA_IP}
OLLAMA_PORT: ${OLLAMA_PORT} OLLAMA_PORT: ${OLLAMA_PORT}
MODEL: ${MODEL}
REDIS_IP: ${REDIS_IP}
REDIS_PORT: ${REDIS_PORT}
networks: networks:
ollama-net: ollama-net:
ipv4_address: ${DISCORD_IP} ipv4_address: ${DISCORD_IP}
@@ -26,7 +29,6 @@ services:
networks: networks:
ollama-net: ollama-net:
ipv4_address: ${OLLAMA_IP} ipv4_address: ${OLLAMA_IP}
runtime: nvidia # use Nvidia Container Toolkit for GPU support runtime: nvidia # use Nvidia Container Toolkit for GPU support
devices: devices:
- /dev/nvidia0 - /dev/nvidia0
@@ -35,7 +37,18 @@ services:
ports: ports:
- ${OLLAMA_PORT}:${OLLAMA_PORT} - ${OLLAMA_PORT}:${OLLAMA_PORT}
# Put Redis Container here? # setup redis container
redis:
image: redis:latest
container_name: redis
restart: always
networks:
ollama-net:
ipv4_address: ${REDIS_IP}
volumes:
- redis:/root/.redis
ports:
- ${REDIS_PORT}:${REDIS_PORT}
# create a network that supports giving addresses withing a specific subnet # create a network that supports giving addresses withing a specific subnet
networks: networks:
@@ -49,3 +62,4 @@ networks:
volumes: volumes:
ollama: ollama:
discord: discord:
redis:

View File

@@ -78,8 +78,7 @@ This is a guide to all of the slash commands for the app.
``` ```
/message-stream stream true /message-stream stream true
``` ```
> [!NOTE] **This is very slow on Discord because "spamming" changes in a channel within a period of 5 seconds is not allowed.**
> This is a very slow progress on Discord because "spamming" changes within 5 seconds is not allowed.
3. Message Style 3. Message Style
This command allows a user to select whether to embed the app's response. This command allows a user to select whether to embed the app's response.

View File

@@ -1,7 +1,9 @@
## Events Guide ## Events Guide
This is a guide to all of the client events for the app. This is a guide to all of the client events for the app.
> [!NOTE] Each of these is logged to the console for a developer to track. > [!NOTE]
> * Each of these is logged to the console for a developer to track.
> * Possible interactions include commands, buttons, menus, etc.
1. ClientReady 1. ClientReady
This event signifies that the Discord app is online. This event signifies that the Discord app is online.
@@ -11,8 +13,6 @@ This is a guide to all of the client events for the app.
This event signifies that a user interacted from Discord in some way. This event signifies that a user interacted from Discord in some way.
Here commands are selected from a knowledge bank and executed if found. Here commands are selected from a knowledge bank and executed if found.
> [!NOTE] Possible interactions include commands, buttons, menus, etc.
3. MessageCreate 3. MessageCreate
This event signifies that a message was sent. This event signifies that a message was sent.
Here user questions and comments for the LLM are processed. Here user questions and comments for the LLM are processed.
@@ -24,4 +24,4 @@ This is a guide to all of the client events for the app.
4. ThreadDelete 4. ThreadDelete
This event signifies that a Discord Thread was deleted. This event signifies that a Discord Thread was deleted.
Here any preferences set for interaction within the thread are cleared away. Here any preferences set for interaction within the thread are cleared away.

View File

@@ -43,11 +43,13 @@ sudo systemctl restart docker
* [GitHub repository](https://github.com/NVIDIA/nvidia-container-toolkit?tab=readme-ov-file) for Nvidia Container Toolkit * [GitHub repository](https://github.com/NVIDIA/nvidia-container-toolkit?tab=readme-ov-file) for Nvidia Container Toolkit
## To Run (with Docker and Docker Compose) ## To Run (with Docker and Docker Compose)
* With the inclusion of subnets in the `docker-compose.yml`, you will need to set the `SUBNET_ADDRESS`, `OLLAMA_IP`, `OLLAMA_PORT`, and `DISCORD_IP`. Here are some default values if you don't care: * With the inclusion of subnets in the `docker-compose.yml`, you will need to set the `SUBNET_ADDRESS`, `OLLAMA_IP`, `OLLAMA_PORT`, `REDIS_IP`, `REDIS_PORT`, and `DISCORD_IP`. Here are some default values if you don't care:
* `SUBNET_ADDRESS = 172.18.0.0`
* `OLLAMA_IP = 172.18.0.2` * `OLLAMA_IP = 172.18.0.2`
* `OLLAMA_PORT = 11434` * `OLLAMA_PORT = 11434`
* `DISCORD_IP = 172.18.0.3` * `DISCORD_IP = 172.18.0.3`
* `SUBNET_ADDRESS = 172.18.0.0` * `REDIS_IP = 172.18.0.4`
* `REDIS_PORT = 6379`
* Don't understand any of this? watch a Networking video to understand subnetting. * Don't understand any of this? watch a Networking video to understand subnetting.
* You also need all environment variables shown in [`.env.sample`](../.env.sample) * You also need all environment variables shown in [`.env.sample`](../.env.sample)
* Otherwise, there is no need to install any npm packages for this, you just need to run `npm run start` to pull the containers and spin them up. * Otherwise, there is no need to install any npm packages for this, you just need to run `npm run start` to pull the containers and spin them up.

1456
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{ {
"name": "discord-ollama", "name": "discord-ollama",
"version": "0.7.2", "version": "0.8.4",
"description": "Ollama Integration into discord", "description": "Ollama Integration into discord",
"main": "build/index.js", "main": "build/index.js",
"exports": "./build/index.js", "exports": "./build/index.js",
@@ -13,33 +13,36 @@
"client": "npm run build && npm run prod", "client": "npm run build && npm run prod",
"clean": "docker compose down && docker rmi $(docker images | grep kevinthedang | tr -s ' ' | cut -d ' ' -f 3) && docker rmi $(docker images --filter \"dangling=true\" -q --no-trunc)", "clean": "docker compose down && docker rmi $(docker images | grep kevinthedang | tr -s ' ' | cut -d ' ' -f 3) && docker rmi $(docker images --filter \"dangling=true\" -q --no-trunc)",
"start": "docker compose build --no-cache && docker compose up -d", "start": "docker compose build --no-cache && docker compose up -d",
"docker:clean": "docker rm -f discord && docker rm -f ollama && docker network prune -f && docker rmi $(docker images | grep kevinthedang | tr -s ' ' | cut -d ' ' -f 3) && docker rmi $(docker images --filter \"dangling=true\" -q --no-trunc)", "docker:clean": "docker rm -f discord && docker rm -f ollama && docker rm -f redis && docker network prune -f && docker rmi $(docker images | grep kevinthedang | tr -s ' ' | cut -d ' ' -f 3) && docker rmi $(docker images --filter \"dangling=true\" -q --no-trunc)",
"docker:network": "docker network create --subnet=172.18.0.0/16 ollama-net", "docker:network": "docker network create --subnet=172.18.0.0/16 ollama-net",
"docker:build": "docker build --no-cache -t kevinthedang/discord-ollama:$(node -p \"require('./package.json').version\") .", "docker:build": "docker build --no-cache -t kevinthedang/discord-ollama:$(node -p \"require('./package.json').version\") .",
"docker:build-latest": "docker build --no-cache -t kevinthedang/discord-ollama:latest .", "docker:build-latest": "docker build --no-cache -t kevinthedang/discord-ollama:latest .",
"docker:client": "docker run -d -v discord:/src/app --name discord --network ollama-net --ip 172.18.0.3 kevinthedang/discord-ollama:$(node -p \"require('./package.json').version\")", "docker:client": "docker run -d -v discord:/src/app --name discord --network ollama-net --ip 172.18.0.3 kevinthedang/discord-ollama:$(node -p \"require('./package.json').version\")",
"docker:redis": "docker run -d -v redis:/root/.redis -p 6379:6379 --name redis --network ollama-net --ip 172.18.0.4 redis:latest",
"docker:ollama": "docker run -d --gpus=all -v ollama:/root/.ollama -p 11434:11434 --name ollama --network ollama-net --ip 172.18.0.2 ollama/ollama:latest", "docker:ollama": "docker run -d --gpus=all -v ollama:/root/.ollama -p 11434:11434 --name ollama --network ollama-net --ip 172.18.0.2 ollama/ollama:latest",
"docker:ollama-cpu": "docker run -d -v ollama:/root/.ollama -p 11434:11434 --name ollama --network ollama-net --ip 172.18.0.2 ollama/ollama:latest", "docker:ollama-cpu": "docker run -d -v ollama:/root/.ollama -p 11434:11434 --name ollama --network ollama-net --ip 172.18.0.2 ollama/ollama:latest",
"docker:start": "docker network prune -f && npm run docker:network && npm run docker:build && npm run docker:client && npm run docker:ollama", "docker:start": "docker network prune -f && npm run docker:network && npm run docker:build && npm run docker:redis && npm run docker:client && npm run docker:ollama",
"docker:start-cpu": "docker network prune -f && npm run docker:network && npm run docker:build && npm run docker:client && npm run docker:ollama-cpu" "docker:start-cpu": "docker network prune -f && npm run docker:network && npm run docker:build && npm run docker:redis && npm run docker:client && npm run docker:ollama-cpu"
}, },
"author": "Kevin Dang", "author": "Kevin Dang",
"license": "ISC", "license": "ISC",
"dependencies": { "dependencies": {
"discord.js": "^14.16.3", "discord.js": "^14.18.0",
"dotenv": "^16.4.5", "dotenv": "^16.5.0",
"ollama": "^0.5.9" "ollama": "^0.5.14",
"redis": "^4.7.0"
}, },
"devDependencies": { "devDependencies": {
"@types/node": "^22.9.0", "@types/node": "^22.13.14",
"@vitest/coverage-v8": "^2.1.4", "@vitest/coverage-v8": "^3.0.9",
"ts-node": "^10.9.2", "ts-node": "^10.9.2",
"tsx": "^4.19.2", "tsx": "^4.19.3",
"typescript": "^5.6.3", "typescript": "^5.8.2",
"vitest": "^2.1.4" "vitest": "^3.0.4"
}, },
"type": "module", "type": "module",
"engines": { "engines": {
"node": ">=16.0.0" "npm": ">=10.9.0",
"node": ">=22.12.0"
} }
} }

View File

@@ -1,42 +1,55 @@
import { Client, GatewayIntentBits } from 'discord.js' import { Client, GatewayIntentBits } from 'discord.js'
import { Ollama } from 'ollama' import { Ollama } from 'ollama'
import { Queue } from './queues/queue.js' import { createClient } from 'redis'
import { UserMessage, registerEvents } from './utils/index.js' import { Queue } from './queues/queue.js'
import Events from './events/index.js' import { UserMessage, registerEvents } from './utils/index.js'
import Keys from './keys.js' import Events from './events/index.js'
import Keys from './keys.js'
// initialize the client with the following permissions when logging in // initialize the client with the following permissions when logging in
const client = new Client({ const client = new Client({
intents: [ intents: [
GatewayIntentBits.Guilds, GatewayIntentBits.Guilds,
GatewayIntentBits.GuildMembers, GatewayIntentBits.GuildMembers,
GatewayIntentBits.GuildMessages, GatewayIntentBits.GuildMessages,
GatewayIntentBits.MessageContent GatewayIntentBits.MessageContent
] ]
}); })
// initialize connection to ollama container // initialize connection to redis
export const ollama = new Ollama({ const redis = createClient({
host: `http://${Keys.ipAddress}:${Keys.portAddress}`, url: `redis://${Keys.redisHost}:${Keys.redisPort}`,
}) })
// Create Queue managed by Events // initialize connection to ollama container
const messageHistory: Queue<UserMessage> = new Queue<UserMessage> export const ollama = new Ollama({
host: `http://${Keys.ipAddress}:${Keys.portAddress}`,
// register all events })
registerEvents(client, Events, messageHistory, ollama)
// Create Queue managed by Events
// Try to log in the client const messageHistory: Queue<UserMessage> = new Queue<UserMessage>
await client.login(Keys.clientToken)
.catch((error) => { // register all events
console.error('[Login Error]', error) registerEvents(client, Events, messageHistory, ollama, Keys.defaultModel)
process.exit(1)
}) // Try to connect to redis
await redis.connect()
// queue up bots name .then(() => console.log('[Redis] Connected'))
messageHistory.enqueue({ .catch((error) => {
role: 'assistant', console.error('[Redis] Connection Error', error)
content: `My name is ${client.user?.username}`, process.exit(1)
images: [] })
// Try to log in the client
await client.login(Keys.clientToken)
.catch((error) => {
console.error('[Login Error]', error)
process.exit(1)
})
// queue up bots name
messageHistory.enqueue({
role: 'assistant',
content: `My name is ${client.user?.username}`,
images: []
}) })

View File

@@ -1,15 +1,15 @@
import { Client, CommandInteraction, ApplicationCommandOptionType } from 'discord.js' import { Client, CommandInteraction, ApplicationCommandOptionType, MessageFlags } from 'discord.js'
import { openConfig, SlashCommand, UserCommand } from '../utils/index.js' import { openConfig, SlashCommand, UserCommand } from '../utils/index.js'
export const Capacity: SlashCommand = { export const Capacity: SlashCommand = {
name: 'modify-capacity', name: 'modify-capacity',
description: 'number of messages bot will hold for context.', description: 'maximum amount messages bot will hold for context.',
// set available user options to pass to the command // set available user options to pass to the command
options: [ options: [
{ {
name: 'context-capacity', name: 'context-capacity',
description: 'a number to set capacity', description: 'number of allowed messages to remember',
type: ApplicationCommandOptionType.Number, type: ApplicationCommandOptionType.Number,
required: true required: true
} }
@@ -22,11 +22,13 @@ export const Capacity: SlashCommand = {
if (!channel || !UserCommand.includes(channel.type)) return if (!channel || !UserCommand.includes(channel.type)) return
// set state of bot chat features // set state of bot chat features
openConfig(`${interaction.user.username}-config.json`, interaction.commandName, interaction.options.get('context-capacity')?.value) openConfig(`${interaction.user.username}-config.json`, interaction.commandName,
interaction.options.get('context-capacity')?.value
)
interaction.reply({ interaction.reply({
content: `Message History Capacity has been set to \`${interaction.options.get('context-capacity')?.value}\``, content: `Max message history is now set to \`${interaction.options.get('context-capacity')?.value}\``,
ephemeral: true flags: MessageFlags.Ephemeral
}) })
} }
} }

View File

@@ -1,9 +1,9 @@
import { Channel, Client, CommandInteraction, TextChannel } from 'discord.js' import { Channel, Client, CommandInteraction, MessageFlags, TextChannel } from 'discord.js'
import { clearChannelInfo, SlashCommand, UserCommand } from '../utils/index.js' import { clearChannelInfo, SlashCommand, UserCommand } from '../utils/index.js'
export const ClearUserChannelHistory: SlashCommand = { export const ClearUserChannelHistory: SlashCommand = {
name: 'clear-user-channel-history', name: 'clear-user-channel-history',
description: 'clears history for user running this command in current channel', description: 'clears history for user in the current channel',
// Clear channel history for intended user // Clear channel history for intended user
run: async (client: Client, interaction: CommandInteraction) => { run: async (client: Client, interaction: CommandInteraction) => {
@@ -14,20 +14,22 @@ export const ClearUserChannelHistory: SlashCommand = {
if (!channel || !UserCommand.includes(channel.type)) return if (!channel || !UserCommand.includes(channel.type)) return
// clear channel info for user // clear channel info for user
const successfulWipe = await clearChannelInfo(interaction.channelId, const successfulWipe = await clearChannelInfo(
interaction.channel as TextChannel, interaction.channelId,
interaction.user.username) interaction.channel as TextChannel,
interaction.user.username
)
// check result of clearing history // check result of clearing history
if (successfulWipe) if (successfulWipe)
interaction.reply({ interaction.reply({
content: `Channel history in **this channel** successfully cleared for **${interaction.user.username}**.`, content: `History cleared in **this channel** cleared for **${interaction.user.username}**.`,
ephemeral: true flags: MessageFlags.Ephemeral
}) })
else else
interaction.reply({ interaction.reply({
content: `Channel history could not be found for **${interaction.user.username}** in **this channel**.\n\nPlease chat with **${client.user?.username}** to start a chat history.`, content: `History was not be found for **${interaction.user.username}** in **this channel**.\n\nPlease chat with **${client.user?.username}** to start a chat history.`,
ephemeral: true flags: MessageFlags.Ephemeral
}) })
} }
} }

View File

@@ -0,0 +1,60 @@
import { ApplicationCommandOptionType, Client, CommandInteraction, MessageFlags } from 'discord.js'
import { UserCommand, SlashCommand } from '../utils/index.js'
import { ollama } from '../client.js'
import { ModelResponse } from 'ollama'
export const DeleteModel: SlashCommand = {
name: 'delete-model',
description: 'deletes a model from the local list of models. Administrator Only.',
// set available user options to pass to the command
options: [
{
name: 'model-name',
description: 'the name of the model to delete',
type: ApplicationCommandOptionType.String,
required: true
}
],
// Delete Model locally stored
run: async (client: Client, interaction: CommandInteraction) => {
// defer reply to avoid timeout
await interaction.deferReply()
const modelInput: string = interaction.options.get('model-name')!!.value as string
// fetch channel and message
const channel = await client.channels.fetch(interaction.channelId)
if (!channel || !UserCommand.includes(channel.type)) return
// Admin Command
if (!interaction.memberPermissions?.has('Administrator')) {
interaction.reply({
content: `${interaction.commandName} is an admin command.\n\nPlease contact a server admin to pull the model you want.`,
flags: MessageFlags.Ephemeral
})
return
}
// check if model exists
const modelExists: boolean = await ollama.list()
.then(response => response.models.some((model: ModelResponse) => model.name.startsWith(modelInput)))
try {
// call ollama to delete model
if (modelExists) {
await ollama.delete({ model: modelInput })
interaction.editReply({
content: `**${modelInput}** was removed from the the library.`
})
} else
throw new Error()
} catch (error) {
// could not delete the model
interaction.reply({
content: `Could not delete the **${modelInput}** model. It probably doesn't exist or you spelled it incorrectly.\n\nPlease try again if this is a mistake.`,
flags: MessageFlags.Ephemeral
})
}
}
}

View File

@@ -1,9 +1,9 @@
import { ChannelType, Client, CommandInteraction, ApplicationCommandOptionType } from 'discord.js' import { Client, CommandInteraction, ApplicationCommandOptionType, MessageFlags } from 'discord.js'
import { AdminCommand, openConfig, SlashCommand } from '../utils/index.js' import { AdminCommand, openConfig, SlashCommand } from '../utils/index.js'
export const Disable: SlashCommand = { export const Disable: SlashCommand = {
name: 'toggle-chat', name: 'toggle-chat',
description: 'toggle all chat features, Adminstrator Only.', description: 'toggle all chat features. Adminstrator Only.',
// set available user options to pass to the command // set available user options to pass to the command
options: [ options: [
@@ -24,18 +24,20 @@ export const Disable: SlashCommand = {
// check if runner is an admin // check if runner is an admin
if (!interaction.memberPermissions?.has('Administrator')) { if (!interaction.memberPermissions?.has('Administrator')) {
interaction.reply({ interaction.reply({
content: `${interaction.commandName} is an Administrator Command.\n\nYou, ${interaction.member?.user.username}, are not an Administrator in this server.\nPlease contact an admin to use this command.`, content: `${interaction.commandName} is an admin command.\n\nPlease contact an admin to use this command for you.`,
ephemeral: true flags: MessageFlags.Ephemeral
}) })
return return
} }
// set state of bot chat features // set state of bot chat features
openConfig(`${interaction.guildId}-config.json`, interaction.commandName, interaction.options.get('enabled')?.value) openConfig(`${interaction.guildId}-config.json`, interaction.commandName,
interaction.options.get('enabled')?.value
)
interaction.reply({ interaction.reply({
content: `Chat features has been \`${interaction.options.get('enabled')?.value ? "enabled" : "disabled" }\``, content: `${client.user?.username} is now **${interaction.options.get('enabled')?.value ? "enabled" : "disabled"}**.`,
ephemeral: true flags: MessageFlags.Ephemeral
}) })
} }
} }

View File

@@ -1,6 +1,5 @@
import { SlashCommand } from '../utils/commands.js' import { SlashCommand } from '../utils/commands.js'
import { ThreadCreate } from './threadCreate.js' import { ThreadCreate } from './threadCreate.js'
import { MessageStyle } from './messageStyle.js'
import { MessageStream } from './messageStream.js' import { MessageStream } from './messageStream.js'
import { Disable } from './disable.js' import { Disable } from './disable.js'
import { Shutoff } from './shutoff.js' import { Shutoff } from './shutoff.js'
@@ -9,16 +8,17 @@ import { PrivateThreadCreate } from './threadPrivateCreate.js'
import { ClearUserChannelHistory } from './cleanUserChannelHistory.js' import { ClearUserChannelHistory } from './cleanUserChannelHistory.js'
import { PullModel } from './pullModel.js' import { PullModel } from './pullModel.js'
import { SwitchModel } from './switchModel.js' import { SwitchModel } from './switchModel.js'
import { DeleteModel } from './deleteModel.js'
export default [ export default [
ThreadCreate, ThreadCreate,
PrivateThreadCreate, PrivateThreadCreate,
MessageStyle,
MessageStream, MessageStream,
Disable, Disable,
Shutoff, Shutoff,
Capacity, Capacity,
ClearUserChannelHistory, ClearUserChannelHistory,
PullModel, PullModel,
SwitchModel SwitchModel,
DeleteModel
] as SlashCommand[] ] as SlashCommand[]

View File

@@ -1,15 +1,15 @@
import { ApplicationCommandOptionType, Client, CommandInteraction } from 'discord.js' import { ApplicationCommandOptionType, Client, CommandInteraction, MessageFlags } from 'discord.js'
import { openConfig, SlashCommand, UserCommand } from '../utils/index.js' import { openConfig, SlashCommand, UserCommand } from '../utils/index.js'
export const MessageStream: SlashCommand = { export const MessageStream: SlashCommand = {
name: 'message-stream', name: 'message-stream',
description: 'change preference on message streaming from ollama. WARNING: can be very slow.', description: 'change preference on message streaming from ollama. WARNING: can be very slow due to Discord limits.',
// user option(s) for setting stream // user option(s) for setting stream
options: [ options: [
{ {
name: 'stream', name: 'stream',
description: 'enable or disable stream preference', description: 'enable or disable message streaming',
type: ApplicationCommandOptionType.Boolean, type: ApplicationCommandOptionType.Boolean,
required: true required: true
} }
@@ -22,11 +22,13 @@ export const MessageStream: SlashCommand = {
if (!channel || !UserCommand.includes(channel.type)) return if (!channel || !UserCommand.includes(channel.type)) return
// save value to json and write to it // save value to json and write to it
openConfig(`${interaction.user.username}-config.json`, interaction.commandName, interaction.options.get('stream')?.value) openConfig(`${interaction.user.username}-config.json`, interaction.commandName,
interaction.options.get('stream')?.value
)
interaction.reply({ interaction.reply({
content: `Message streaming preferences set to: \`${interaction.options.get('stream')?.value}\``, content: `Message streaming is now set to: \`${interaction.options.get('stream')?.value}\``,
ephemeral: true flags: MessageFlags.Ephemeral
}) })
} }
} }

View File

@@ -1,32 +0,0 @@
import { Client, CommandInteraction, ApplicationCommandOptionType } from 'discord.js'
import { openConfig, SlashCommand, UserCommand } from '../utils/index.js'
export const MessageStyle: SlashCommand = {
name: 'message-style',
description: 'sets the message style to embed or normal',
// set available user options to pass to the command
options: [
{
name: 'embed',
description: 'toggle embedded or normal message',
type: ApplicationCommandOptionType.Boolean,
required: true
}
],
// Query for message information and set the style
run: async (client: Client, interaction: CommandInteraction) => {
// fetch channel and message
const channel = await client.channels.fetch(interaction.channelId)
if (!channel || !UserCommand.includes(channel.type)) return
// set the message style
openConfig(`${interaction.user.username}-config.json`, interaction.commandName, interaction.options.get('embed')?.value)
interaction.reply({
content: `Message style preferences for embed set to: \`${interaction.options.get('embed')?.value}\``,
ephemeral: true
})
}
}

View File

@@ -1,12 +1,11 @@
import { ApplicationCommandOptionType, Client, CommandInteraction } from "discord.js"; import { ApplicationCommandOptionType, Client, CommandInteraction, MessageFlags } from "discord.js"
import { SlashCommand } from "../utils/commands.js"; import { ollama } from "../client.js"
import { ollama } from "../client.js"; import { ModelResponse } from "ollama"
import { ModelResponse } from "ollama"; import { UserCommand, SlashCommand } from "../utils/index.js"
import { UserCommand } from "../utils/index.js";
export const PullModel: SlashCommand = { export const PullModel: SlashCommand = {
name: 'pull-model', name: 'pull-model',
description: 'pulls a model from the ollama model library', description: 'pulls a model from the ollama model library. Administrator Only.',
// set available user options to pass to the command // set available user options to pass to the command
options: [ options: [
@@ -28,18 +27,31 @@ export const PullModel: SlashCommand = {
const channel = await client.channels.fetch(interaction.channelId) const channel = await client.channels.fetch(interaction.channelId)
if (!channel || !UserCommand.includes(channel.type)) return if (!channel || !UserCommand.includes(channel.type)) return
// Admin Command
if (!interaction.memberPermissions?.has('Administrator')) {
interaction.reply({
content: `${interaction.commandName} is an admin command.\n\nPlease contact a server admin to pull the model you want.`,
flags: MessageFlags.Ephemeral
})
return
}
// check if model was already pulled // check if model was already pulled
const modelExists: boolean = await ollama.list() const modelExists: boolean = await ollama.list()
.then(response => response.models.some((model: ModelResponse) => model.name.startsWith(modelInput))) .then(response => response.models.some((model: ModelResponse) => model.name.startsWith(modelInput)))
try { try {
// call ollama to pull desired model // call ollama to pull desired model
if (!modelExists) if (!modelExists) {
interaction.editReply({
content: `**${modelInput}** could not be found. Please wait patiently as I try to retrieve it...`
})
await ollama.pull({ model: modelInput }) await ollama.pull({ model: modelInput })
}
} catch (error) { } catch (error) {
// could not resolve pull or model unfound // could not resolve pull or model unfound
interaction.editReply({ interaction.editReply({
content: `Could not pull/locate the **${modelInput}** model within the [Ollama Model Library](https://ollama.com/library).\n\nPlease check the model library and try again.` content: `Could not retrieve the **${modelInput}** model. You can find models at [Ollama Model Library](https://ollama.com/library).\n\nPlease check the model library and try again.`
}) })
return return
} }
@@ -47,11 +59,11 @@ export const PullModel: SlashCommand = {
// successful interaction // successful interaction
if (modelExists) if (modelExists)
interaction.editReply({ interaction.editReply({
content: `**${modelInput}** is already in your local model library.` content: `**${modelInput}** is already available.`
}) })
else else
interaction.editReply({ interaction.editReply({
content: `Successfully added **${modelInput}** into your local model library.` content: `Successfully added **${modelInput}**.`
}) })
} }
} }

View File

@@ -1,21 +1,10 @@
import { Client, CommandInteraction, ApplicationCommandOptionType } from 'discord.js' import { Client, CommandInteraction, MessageFlags } from 'discord.js'
import { SlashCommand } from '../utils/commands.js' import { AdminCommand, SlashCommand } from '../utils/index.js'
import { AdminCommand } from '../utils/index.js'
export const Shutoff: SlashCommand = { export const Shutoff: SlashCommand = {
name: 'shutoff', name: 'shutoff',
description: 'shutdown the bot. You will need to manually bring it online again. Administrator Only.', description: 'shutdown the bot. You will need to manually bring it online again. Administrator Only.',
// set available user options to pass to the command
options: [
{
name: 'are-you-sure',
description: 'true = yes, false = I\'m scared',
type: ApplicationCommandOptionType.Boolean,
required: true
}
],
// Query for message information and set the style // Query for message information and set the style
run: async (client: Client, interaction: CommandInteraction) => { run: async (client: Client, interaction: CommandInteraction) => {
// fetch channel and message // fetch channel and message
@@ -23,29 +12,25 @@ export const Shutoff: SlashCommand = {
if (!channel || !AdminCommand.includes(channel.type)) return if (!channel || !AdminCommand.includes(channel.type)) return
// log this, this will probably be improtant for logging who did this // log this, this will probably be improtant for logging who did this
console.log(`User -> ${interaction.user.tag} attempting to shutdown ${client.user!!.tag}`) console.log(`[Command: shutoff] User ${interaction.user.tag} attempting to shutdown ${client.user!!.tag}`)
// check if admin or false on shutdown // check if admin or false on shutdown
if (!interaction.memberPermissions?.has('Administrator')) { if (!interaction.memberPermissions?.has('Administrator')) {
interaction.reply({ interaction.reply({
content: `**Shutdown Aborted:**\n\n${interaction.user.tag}, You do not have permission to shutoff **${client.user?.tag}**.`, content: `**Shutdown Aborted:**\n\n${interaction.user.tag}, You do not have permission to shutoff **${client.user?.tag}**.`,
ephemeral: true flags: MessageFlags.Ephemeral
}) })
return // stop from shutting down return // stop from shutting down
} else if (!interaction.options.get('are-you-sure')?.value) {
interaction.reply({
content: `**Shutdown Aborted:**\n\n${interaction.user.tag}, You didn't want to shutoff **${client.user?.tag}**.`,
ephemeral: true
})
return // chickened out
} }
// Shutoff cleared, do it // Shutoff cleared, do it
interaction.reply({ interaction.reply({
content: `${client.user?.tag} is ${interaction.options.get('are-you-sure')?.value ? "shutting down now." : "not shutting down." }`, content: `${client.user?.tag} is shutting down.`,
ephemeral: true flags: MessageFlags.Ephemeral
}) })
console.log(`[Command: shutoff] ${client.user?.tag} is shutting down.`)
// clean up client instance and stop // clean up client instance and stop
client.destroy() client.destroy()
} }

View File

@@ -1,12 +1,11 @@
import { ApplicationCommandOptionType, Client, CommandInteraction } from "discord.js"; import { ApplicationCommandOptionType, Client, CommandInteraction } from "discord.js"
import { SlashCommand } from "../utils/commands.js"; import { ollama } from "../client.js"
import { ollama } from "../client.js"; import { ModelResponse } from "ollama"
import { ModelResponse } from "ollama"; import { openConfig, UserCommand, SlashCommand } from "../utils/index.js"
import { openConfig, UserCommand } from "../utils/index.js";
export const SwitchModel: SlashCommand = { export const SwitchModel: SlashCommand = {
name: 'switch-model', name: 'switch-model',
description: 'switches current model to preferred model to use.', description: 'switches current model to use.',
// set available user options to pass to the command // set available user options to pass to the command
options: [ options: [
@@ -29,46 +28,38 @@ export const SwitchModel: SlashCommand = {
if (!channel || !UserCommand.includes(channel.type)) return if (!channel || !UserCommand.includes(channel.type)) return
try { try {
// Phase 1: Set the model // Phase 1: Switch to the model
let switchSuccess = false let switchSuccess = false
await ollama.list() await ollama.list()
.then(response => { .then(response => {
for (const model in response.models) { for (const model in response.models) {
const currentModel: ModelResponse = response.models[model] const currentModel: ModelResponse = response.models[model]
if (currentModel.name.startsWith(modelInput)) { if (currentModel.name.startsWith(modelInput)) {
openConfig(`${interaction.user.username}-config.json`, interaction.commandName, modelInput) openConfig(`${interaction.user.username}-config.json`, interaction.commandName, modelInput)
// successful switch // successful switch
interaction.editReply({ interaction.editReply({
content: `Successfully switched to **${modelInput}** as the preferred model for ${interaction.user.username}.` content: `Successfully switched to **${modelInput}** as the preferred model for ${interaction.user.username}.`
}) })
switchSuccess = true switchSuccess = true
}
} }
} })
})
// todo: problem can be here if async messes up // todo: problem can be here if async messes up
if (switchSuccess) return if (switchSuccess) {
// set model now that it exists
openConfig(`${interaction.user.username}-config.json`, interaction.commandName, modelInput)
return
}
// Phase 2: Try to get it regardless // Phase 2: Notify user of failure to find model.
interaction.editReply({ interaction.editReply({
content: `Could not find **${modelInput}** in local model library, trying to pull it now...\n\nThis could take a few moments... Please be patient!` content: `Could not find **${modelInput}** in local model library.\n\nPlease contact an server admin for access to this model.`
})
await ollama.pull({
model: modelInput
})
// set model now that it exists
openConfig(`${interaction.user.username}-config.json`, interaction.commandName, modelInput)
// We got the model!
interaction.editReply({
content: `Successfully added and set **${modelInput}** as your preferred model.`
}) })
} catch (error) { } catch (error) {
// could not resolve user model switch // could not resolve user model switch
interaction.editReply({ interaction.editReply({
content: `Unable to switch user preferred model to **${modelInput}**.\n\n${error}\n\nPossible solution is to run \`/pull-model ${modelInput}\` and try again.` content: `Unable to switch user preferred model to **${modelInput}**.\n\n${error}\n\nPossible solution is to request an server admin run \`/pull-model ${modelInput}\` and try again.`
}) })
return return
} }

View File

@@ -1,4 +1,4 @@
import { ChannelType, Client, CommandInteraction, TextChannel, ThreadChannel } from 'discord.js' import { ChannelType, Client, CommandInteraction, MessageFlags, TextChannel, ThreadChannel } from 'discord.js'
import { AdminCommand, openChannelInfo, SlashCommand } from '../utils/index.js' import { AdminCommand, openChannelInfo, SlashCommand } from '../utils/index.js'
export const ThreadCreate: SlashCommand = { export const ThreadCreate: SlashCommand = {
@@ -18,17 +18,15 @@ export const ThreadCreate: SlashCommand = {
}) })
// Send a message in the thread // Send a message in the thread
thread.send(`Hello ${interaction.user} and others! \n\nIt's nice to meet you. Please talk to me by typing **@${client.user?.username}** with your prompt.`) thread.send(`Hello ${interaction.user} and others! \n\nIt's nice to meet you. Please talk to me by typing **@${client.user?.username}** with your message.`)
// handle storing this chat channel // handle storing this chat channel
openChannelInfo(thread.id, openChannelInfo(thread.id, thread as ThreadChannel, interaction.user.tag)
thread as ThreadChannel,
interaction.user.tag)
// user only reply // user only reply
return interaction.reply({ return interaction.reply({
content: `I can help you in thread **${thread.id}** below.`, content: `I can help you in <#${thread.id}> below.`,
ephemeral: true flags: MessageFlags.Ephemeral
}) })
} }
} }

View File

@@ -1,4 +1,4 @@
import { ChannelType, Client, CommandInteraction, TextChannel, ThreadChannel } from 'discord.js' import { ChannelType, Client, CommandInteraction, MessageFlags, TextChannel, ThreadChannel } from 'discord.js'
import { AdminCommand, openChannelInfo, SlashCommand } from '../utils/index.js' import { AdminCommand, openChannelInfo, SlashCommand } from '../utils/index.js'
export const PrivateThreadCreate: SlashCommand = { export const PrivateThreadCreate: SlashCommand = {
@@ -22,15 +22,12 @@ export const PrivateThreadCreate: SlashCommand = {
// handle storing this chat channel // handle storing this chat channel
// store: thread.id, thread.name // store: thread.id, thread.name
openChannelInfo(thread.id, openChannelInfo(thread.id, thread as ThreadChannel, interaction.user.tag)
thread as ThreadChannel,
interaction.user.tag
)
// user only reply // user only reply
return interaction.reply({ return interaction.reply({
content: `I can help you in thread **${thread.id}**. Please refer to the private channel below this one.`, content: `I can help you in <#${thread.id}>.`,
ephemeral: true flags: MessageFlags.Ephemeral
}) })
} }
} }

View File

@@ -7,7 +7,7 @@ import commands from '../commands/index.js'
*/ */
export default event(Events.InteractionCreate, async ({ log, client }, interaction) => { export default event(Events.InteractionCreate, async ({ log, client }, interaction) => {
if (!interaction.isCommand() || !interaction.isChatInputCommand()) return if (!interaction.isCommand() || !interaction.isChatInputCommand()) return
log(`Interaction called \'${interaction.commandName}\' from ${interaction.user.tag}.`) log(`Interaction called \'${interaction.commandName}\' from ${interaction.user.tag}.`)
// ensure command exists, otherwise kill event // ensure command exists, otherwise kill event

View File

@@ -1,6 +1,9 @@
import { TextChannel } from 'discord.js' import { TextChannel } from 'discord.js'
import { embedMessage, event, Events, normalMessage, UserMessage, clean } from '../utils/index.js' import { event, Events, normalMessage, UserMessage, clean } from '../utils/index.js'
import { getChannelInfo, getServerConfig, getUserConfig, openChannelInfo, openConfig, UserConfig, getAttachmentData } from '../utils/index.js' import {
getChannelInfo, getServerConfig, getUserConfig, openChannelInfo,
openConfig, UserConfig, getAttachmentData, getTextFileAttachmentData
} from '../utils/index.js'
/** /**
* Max Message length for free users is 2000 characters (bot or not). * Max Message length for free users is 2000 characters (bot or not).
@@ -8,9 +11,9 @@ import { getChannelInfo, getServerConfig, getUserConfig, openChannelInfo, openCo
* *
* @param message the message received from the channel * @param message the message received from the channel
*/ */
export default event(Events.MessageCreate, async ({ log, msgHist, ollama, client }, message) => { export default event(Events.MessageCreate, async ({ log, msgHist, ollama, client, defaultModel }, message) => {
const clientId = client.user!!.id const clientId = client.user!!.id
const cleanedMessage = clean(message.content, clientId) let cleanedMessage = clean(message.content, clientId)
log(`Message \"${cleanedMessage}\" from ${message.author.tag} in channel/thread ${message.channelId}.`) log(`Message \"${cleanedMessage}\" from ${message.author.tag} in channel/thread ${message.channelId}.`)
// Do not respond if bot talks in the chat // Do not respond if bot talks in the chat
@@ -21,57 +24,88 @@ export default event(Events.MessageCreate, async ({ log, msgHist, ollama, client
// default stream to false // default stream to false
let shouldStream = false let shouldStream = false
// Params for Preferences Fetching
const maxRetries = 3
const delay = 1000 // in millisecons
try { try {
// Retrieve Server/Guild Preferences // Retrieve Server/Guild Preferences
await new Promise((resolve, reject) => { let attempt = 0
getServerConfig(`${message.guildId}-config.json`, (config) => { while (attempt < maxRetries) {
// check if config.json exists try {
if (config === undefined) { await new Promise((resolve, reject) => {
// Allowing chat options to be available getServerConfig(`${message.guildId}-config.json`, (config) => {
openConfig(`${message.guildId}-config.json`, 'toggle-chat', true) // check if config.json exists
reject(new Error('No Server Preferences is set up.\n\nCreating default server preferences file...\nPlease try chatting again.')) if (config === undefined) {
return // Allowing chat options to be available
} openConfig(`${message.guildId}-config.json`, 'toggle-chat', true)
reject(new Error('Failed to locate or create Server Preferences\n\nPlease try chatting again...'))
}
// check if chat is disabled // check if chat is disabled
if (!config.options['toggle-chat']) { else if (!config.options['toggle-chat'])
reject(new Error('Admin(s) have disabled chat features.\n\n Please contact your server\'s admin(s).')) reject(new Error('Admin(s) have disabled chat features.\n\n Please contact your server\'s admin(s).'))
return else
} resolve(config)
})
})
break // successful
} catch (error) {
++attempt
if (attempt < maxRetries) {
log(`Attempt ${attempt} failed for Server Preferences. Retrying in ${delay}ms...`)
await new Promise(ret => setTimeout(ret, delay))
} else
throw new Error(`Could not retrieve Server Preferences, please try chatting again...`)
}
}
resolve(config) // Reset attempts for User preferences
}) attempt = 0
}) let userConfig: UserConfig | undefined
// Retrieve User Preferences while (attempt < maxRetries) {
const userConfig: UserConfig = await new Promise((resolve, reject) => { try {
getUserConfig(`${message.author.username}-config.json`, (config) => { // Retrieve User Preferences
if (config === undefined) { userConfig = await new Promise((resolve, reject) => {
openConfig(`${message.author.username}-config.json`, 'message-style', false) getUserConfig(`${message.author.username}-config.json`, (config) => {
reject(new Error('No User Preferences is set up.\n\nCreating preferences file with \`message-style\` set as \`false\` for regular message style.\nPlease try chatting again.')) if (config === undefined) {
return openConfig(`${message.author.username}-config.json`, 'message-style', false)
} openConfig(`${message.author.username}-config.json`, 'switch-model', defaultModel)
reject(new Error('No User Preferences is set up.\n\nCreating preferences file with \`message-style\` set as \`false\` for regular message style.\nPlease try chatting again.'))
// check if there is a set capacity in config return
if (typeof config.options['modify-capacity'] !== 'number') }
log(`Capacity is undefined, using default capacity of ${msgHist.capacity}.`)
else if (config.options['modify-capacity'] === msgHist.capacity)
log(`Capacity matches config as ${msgHist.capacity}, no changes made.`)
else {
log(`New Capacity found. Setting Context Capacity to ${config.options['modify-capacity']}.`)
msgHist.capacity = config.options['modify-capacity']
}
// set stream state
shouldStream = config.options['message-stream'] as boolean || false
if (typeof config.options['switch-model'] !== 'string') // check if there is a set capacity in config
reject(new Error(`No Model was set. Please set a model by running \`/switch-model <model of choice>\`.\n\nIf you do not have any models. Run \`/pull-model <model name>\`.`)) else if (typeof config.options['modify-capacity'] !== 'number')
log(`Capacity is undefined, using default capacity of ${msgHist.capacity}.`)
resolve(config) else if (config.options['modify-capacity'] === msgHist.capacity)
}) log(`Capacity matches config as ${msgHist.capacity}, no changes made.`)
}) else {
log(`New Capacity found. Setting Context Capacity to ${config.options['modify-capacity']}.`)
msgHist.capacity = config.options['modify-capacity']
}
// set stream state
shouldStream = config.options['message-stream'] as boolean || false
if (typeof config.options['switch-model'] !== 'string')
reject(new Error(`No Model was set. Please set a model by running \`/switch-model <model of choice>\`.\n\nIf you do not have any models. Run \`/pull-model <model name>\`.`))
resolve(config)
})
})
break // successful
} catch (error) {
++attempt
if (attempt < maxRetries) {
log(`Attempt ${attempt} failed for User Preferences. Retrying in ${delay}ms...`)
await new Promise(ret => setTimeout(ret, delay))
} else
throw new Error(`Could not retrieve User Preferences, please try chatting again...`)
}
}
// need new check for "open/active" threads/channels here! // need new check for "open/active" threads/channels here!
let chatMessages: UserMessage[] = await new Promise((resolve) => { let chatMessages: UserMessage[] = await new Promise((resolve) => {
@@ -88,7 +122,7 @@ export default event(Events.MessageCreate, async ({ log, msgHist, ollama, client
if (chatMessages.length === 0) { if (chatMessages.length === 0) {
chatMessages = await new Promise((resolve, reject) => { chatMessages = await new Promise((resolve, reject) => {
openChannelInfo(message.channelId, openChannelInfo(message.channelId,
message.channel as TextChannel, message.channel as TextChannel,
message.author.tag message.author.tag
) )
@@ -103,11 +137,18 @@ export default event(Events.MessageCreate, async ({ log, msgHist, ollama, client
}) })
} }
// response string for ollama to put its response if (!userConfig)
let response: string throw new Error(`Failed to initialize User Preference for **${message.author.username}**.\n\nIt's likely you do not have a model set. Please use the \`switch-model\` command to do that.`)
// get message attachment if exists // get message attachment if exists
const messageAttachment: string[] = await getAttachmentData(message.attachments.first()) const attachment = message.attachments.first()
let messageAttachment: string[] = []
if (attachment && attachment.name?.endsWith(".txt"))
cleanedMessage += await getTextFileAttachmentData(attachment)
else if (attachment)
messageAttachment = await getAttachmentData(attachment)
const model: string = userConfig.options['switch-model'] const model: string = userConfig.options['switch-model']
// set up new queue // set up new queue
@@ -122,12 +163,9 @@ export default event(Events.MessageCreate, async ({ log, msgHist, ollama, client
content: cleanedMessage, content: cleanedMessage,
images: messageAttachment || [] images: messageAttachment || []
}) })
// undefined or false, use normal, otherwise use embed // response string for ollama to put its response
if (userConfig.options['message-style']) const response: string = await normalMessage(message, ollama, model, msgHist, shouldStream)
response = await embedMessage(message, ollama, model, msgHist, shouldStream)
else
response = await normalMessage(message, ollama, model, msgHist, shouldStream)
// If something bad happened, remove user query and stop // If something bad happened, remove user query and stop
if (response == undefined) { msgHist.pop(); return } if (response == undefined) { msgHist.pop(); return }
@@ -136,16 +174,16 @@ export default event(Events.MessageCreate, async ({ log, msgHist, ollama, client
while (msgHist.size() >= msgHist.capacity) msgHist.dequeue() while (msgHist.size() >= msgHist.capacity) msgHist.dequeue()
// successful query, save it in context history // successful query, save it in context history
msgHist.enqueue({ msgHist.enqueue({
role: 'assistant', role: 'assistant',
content: response, content: response,
images: messageAttachment || [] images: messageAttachment || []
}) })
// only update the json on success // only update the json on success
openChannelInfo(message.channelId, openChannelInfo(message.channelId,
message.channel as TextChannel, message.channel as TextChannel,
message.author.tag, message.author.tag,
msgHist.getItems() msgHist.getItems()
) )
} catch (error: any) { } catch (error: any) {

View File

@@ -20,8 +20,8 @@ export default event(Events.ThreadDelete, async ({ log }, thread: ThreadChannel)
// filter files by thread id being deleted // filter files by thread id being deleted
const filesToDiscard = files.filter( const filesToDiscard = files.filter(
file => file.startsWith(`${thread.id}-`) && file => file.startsWith(`${thread.id}-`) &&
file.endsWith('.json')) file.endsWith('.json'))
// remove files by unlinking // remove files by unlinking
filesToDiscard.forEach(file => { filesToDiscard.forEach(file => {
@@ -36,5 +36,5 @@ export default event(Events.ThreadDelete, async ({ log }, thread: ThreadChannel)
}) })
} catch (error) { } catch (error) {
log(`Issue deleting user history files from ${thread.id}`) log(`Issue deleting user history files from ${thread.id}`)
} }
}) })

View File

@@ -1,9 +1,12 @@
import { getEnvVar } from './utils/index.js' import { getEnvVar } from './utils/index.js'
export const Keys = { export const Keys = {
clientToken: getEnvVar('CLIENT_TOKEN'), clientToken: getEnvVar('CLIENT_TOKEN'),
ipAddress: getEnvVar('OLLAMA_IP', '127.0.0.1'), // default ollama ip if none ipAddress: getEnvVar('OLLAMA_IP', '127.0.0.1'), // default ollama ip if none
portAddress: getEnvVar('OLLAMA_PORT', '11434'), // default ollama port if none portAddress: getEnvVar('OLLAMA_PORT', '11434'), // default ollama port if none
} as const // readonly keys defaultModel: getEnvVar('MODEL', 'llama3.2'),
redisHost: getEnvVar('REDIS_IP', '172.18.0.4'), // default redis host if none
redisPort: parseInt(getEnvVar('REDIS_PORT', '6379')) // default redis port if none
} as const // readonly keys
export default Keys export default Keys

View File

@@ -17,7 +17,7 @@ export class Queue<T> implements IQueue<T> {
* Set up Queue * Set up Queue
* @param capacity max length of queue * @param capacity max length of queue
*/ */
constructor(public capacity: number = 5) {} constructor(public capacity: number = 5) { }
/** /**
* Put item in front of queue * Put item in front of queue

View File

@@ -3,7 +3,6 @@ import { UserMessage } from './index.js'
export interface UserConfiguration { export interface UserConfiguration {
'message-stream'?: boolean, 'message-stream'?: boolean,
'message-style'?: boolean,
'modify-capacity': number, 'modify-capacity': number,
'switch-model': string 'switch-model': string
} }

View File

@@ -21,14 +21,13 @@ export function getEnvVar(name: string, fallback?: string): string {
throw new Error(`Environment variable ${name} is not set.`) throw new Error(`Environment variable ${name} is not set.`)
// validate User-Generated Discord Application Tokens // validate User-Generated Discord Application Tokens
if (name === "CLIENT_TOKEN") if (name === "CLIENT_TOKEN" && value.length > 72)
if (value.length < 72) throw new Error(`The "CLIENT_TOKEN" provided is not of at least length 72. throw new Error(`The "CLIENT_TOKEN" provided is not of at least length 72.
This is probably an invalid token unless Discord updated their token policy. Please provide a valid token.`) This is probably an invalid token unless Discord updated their token policy. Please provide a valid token.`)
// validate IPv4 address found in environment variables // validate IPv4 address found in environment variables
if (name.endsWith("_IP") || name.endsWith("_ADDRESS")) if ((name.endsWith("_IP") || name.endsWith("_ADDRESS")) && !ipValidate.test(value))
if (!ipValidate.test(value)) throw new Error(`Environment variable ${name} does not follow IPv4 formatting.`)
throw new Error(`Environment variable ${name} does not follow IPv4 formatting.`)
// return env variable // return env variable
return value return value

View File

@@ -15,8 +15,8 @@ export type EventKeys = keyof ClientEvents // only wants keys of ClientEvents ob
* @param msgHist message history * @param msgHist message history
*/ */
export type ChatParams = { export type ChatParams = {
model: string, model: string,
ollama: Ollama, ollama: Ollama,
msgHist: UserMessage[] msgHist: UserMessage[]
} }
@@ -24,6 +24,7 @@ export type ChatParams = {
* Format for the messages to be stored when communicating when the bot * Format for the messages to be stored when communicating when the bot
* @param role either assistant, user, or system * @param role either assistant, user, or system
* @param content string of the message the user or assistant provided * @param content string of the message the user or assistant provided
* @param images array of images that the user or assistant provided
*/ */
export type UserMessage = { export type UserMessage = {
role: string, role: string,
@@ -33,11 +34,18 @@ export type UserMessage = {
// Event properties // Event properties
export interface EventProps { export interface EventProps {
client: Client client: Client,
log: LogMethod log: LogMethod,
msgHist: Queue<UserMessage> msgHist: Queue<UserMessage>,
ollama: Ollama ollama: Ollama,
defaultModel: String
} }
/**
* Format for the callback function tied to an event
* @param props the properties of the event
* @param args the arguments of the event
*/
export type EventCallback<T extends EventKeys> = ( export type EventCallback<T extends EventKeys> = (
props: EventProps, props: EventProps,
...args: ClientEvents[T] ...args: ClientEvents[T]
@@ -49,6 +57,12 @@ export interface Event<T extends EventKeys = EventKeys> {
callback: EventCallback<T> callback: EventCallback<T>
} }
/**
* Method to create an event object
* @param key type of event
* @param callback function to run when event is triggered
* @returns event object
*/
export function event<T extends EventKeys>(key: T, callback: EventCallback<T>): Event<T> { export function event<T extends EventKeys>(key: T, callback: EventCallback<T>): Event<T> {
return { key, callback } return { key, callback }
} }
@@ -61,10 +75,11 @@ export function event<T extends EventKeys>(key: T, callback: EventCallback<T>):
* @param ollama the initialized ollama instance * @param ollama the initialized ollama instance
*/ */
export function registerEvents( export function registerEvents(
client: Client, client: Client,
events: Event[], events: Event[],
msgHist: Queue<UserMessage>, msgHist: Queue<UserMessage>,
ollama: Ollama ollama: Ollama,
defaultModel: String
): void { ): void {
for (const { key, callback } of events) { for (const { key, callback } of events) {
client.on(key, (...args) => { client.on(key, (...args) => {
@@ -73,7 +88,7 @@ export function registerEvents(
// Handle Errors, call callback, log errors as needed // Handle Errors, call callback, log errors as needed
try { try {
callback({ client, log, msgHist, ollama }, ...args) callback({ client, log, msgHist, ollama, defaultModel }, ...args)
} catch (error) { } catch (error) {
log('[Uncaught Error]', error) log('[Uncaught Error]', error)
} }

View File

@@ -54,4 +54,13 @@ export async function getAttachmentData(attachment: Attachment | undefined): Pro
const buffer = await getAttachmentBuffer(url) const buffer = await getAttachmentBuffer(url)
const base64String = arrayBufferToBase64(buffer) const base64String = arrayBufferToBase64(buffer)
return [base64String] return [base64String]
}
/**
* Method to retrieve the string data from the text file
*
* @param attachment the text file to convert to a string
*/
export async function getTextFileAttachmentData(attachment: Attachment): Promise<string> {
return await (await fetch(attachment.url)).text()
} }

View File

@@ -80,7 +80,14 @@ export async function openChannelInfo(filename: string, channel: TextChannel | T
} }
}) })
} else { // file doesn't exist, create it } else { // file doesn't exist, create it
const object: Configuration = JSON.parse(`{ \"id\": \"${channel?.id}\", \"name\": \"${channel?.name}\", \"user\": \"${user}\", \"messages\": []}`) const object: Configuration = JSON.parse(
`{
\"id\": \"${channel?.id}\",
\"name\": \"${channel?.name}\",
\"user\": \"${user}\",
\"messages\": []
}`
)
const directory = path.dirname(fullFileName) const directory = path.dirname(fullFileName)
if (!fs.existsSync(directory)) if (!fs.existsSync(directory))
@@ -103,7 +110,7 @@ export async function getChannelInfo(filename: string, callback: (config: Channe
if (fs.existsSync(fullFileName)) { if (fs.existsSync(fullFileName)) {
fs.readFile(fullFileName, 'utf8', (error, data) => { fs.readFile(fullFileName, 'utf8', (error, data) => {
if (error) { if (error) {
callback(undefined) callback(undefined)
return // something went wrong... stop return // something went wrong... stop
} }
callback(JSON.parse(data)) callback(JSON.parse(data))

View File

@@ -12,7 +12,7 @@ import path from 'path'
// add type of change (server, user) // add type of change (server, user)
export function openConfig(filename: string, key: string, value: any) { export function openConfig(filename: string, key: string, value: any) {
const fullFileName = `data/${filename}` const fullFileName = `data/${filename}`
// check if the file exists, if not then make the config file // check if the file exists, if not then make the config file
if (fs.existsSync(fullFileName)) { if (fs.existsSync(fullFileName)) {
fs.readFile(fullFileName, 'utf8', (error, data) => { fs.readFile(fullFileName, 'utf8', (error, data) => {
@@ -58,7 +58,7 @@ export async function getServerConfig(filename: string, callback: (config: Serve
if (fs.existsSync(fullFileName)) { if (fs.existsSync(fullFileName)) {
fs.readFile(fullFileName, 'utf8', (error, data) => { fs.readFile(fullFileName, 'utf8', (error, data) => {
if (error) { if (error) {
callback(undefined) callback(undefined)
return // something went wrong... stop return // something went wrong... stop
} }
callback(JSON.parse(data)) callback(JSON.parse(data))
@@ -81,7 +81,7 @@ export async function getUserConfig(filename: string, callback: (config: UserCon
if (fs.existsSync(fullFileName)) { if (fs.existsSync(fullFileName)) {
fs.readFile(fullFileName, 'utf8', (error, data) => { fs.readFile(fullFileName, 'utf8', (error, data) => {
if (error) { if (error) {
callback(undefined) callback(undefined)
return // something went wrong... stop return // something went wrong... stop
} }
callback(JSON.parse(data)) callback(JSON.parse(data))

View File

@@ -5,7 +5,7 @@ import { AbortableAsyncIterator } from "ollama/src/utils.js"
/** /**
* Method to query the Ollama client for async generation * Method to query the Ollama client for async generation
* @param params * @param params
* @returns Asyn * @returns AsyncIterator<ChatResponse> generated by the Ollama client
*/ */
export async function streamResponse(params: ChatParams): Promise<AbortableAsyncIterator<ChatResponse>> { export async function streamResponse(params: ChatParams): Promise<AbortableAsyncIterator<ChatResponse>> {
return await params.ollama.chat({ return await params.ollama.chat({

View File

@@ -1,7 +1,6 @@
// Centralized import index // Centralized import index
export * from './env.js' export * from './env.js'
export * from './events.js' export * from './events.js'
export * from './messageEmbed.js'
export * from './messageNormal.js' export * from './messageNormal.js'
export * from './commands.js' export * from './commands.js'
export * from './configInterfaces.js' export * from './configInterfaces.js'

View File

@@ -1,5 +1,3 @@
import Keys from "../keys.js"
/** /**
* Clean up the bot user_id so it only has the prompt * Clean up the bot user_id so it only has the prompt
* *

View File

@@ -1,129 +0,0 @@
import { EmbedBuilder, Message, SendableChannels } from 'discord.js'
import { ChatResponse, Ollama } from 'ollama'
import { ChatParams, UserMessage, streamResponse, blockResponse } from './index.js'
import { Queue } from '../queues/queue.js'
import { AbortableAsyncIterator } from 'ollama/src/utils.js'
/**
* Method to send replies as normal text on discord like any other user
* @param message message sent by the user
* @param model name of model to run query
* @param msgHist message history between user and model
*/
export async function embedMessage(
message: Message,
ollama: Ollama,
model: string,
msgHist: Queue<UserMessage>,
stream: boolean
): Promise<string> {
// bot response
let response: ChatResponse | AbortableAsyncIterator<ChatResponse>
let result: string = ''
// initial message to client
const botMessage = new EmbedBuilder()
.setTitle(`Responding to ${message.author.tag}`)
.setDescription('Generating Response . . .')
.setColor('#00FF00')
// send the message
const channel = message.channel as SendableChannels
const sentMessage = await channel.send({ embeds: [botMessage] })
// create params
const params: ChatParams = {
model: model,
ollama: ollama,
msgHist: msgHist.getItems()
}
try {
// check if embed needs to stream
if (stream) {
response = await streamResponse(params)
for await (const portion of response) {
result += portion.message.content
// exceeds handled length
if (result.length > 5000) {
const errorEmbed = new EmbedBuilder()
.setTitle(`Responding to ${message.author.tag}`)
.setDescription(`Response length ${result.length} has exceeded Discord maximum.\n\nLong Stream messages not supported.`)
.setColor('#00FF00')
// send error
channel.send({ embeds: [errorEmbed] })
break // cancel loop and stop
}
// new embed per token...
const streamEmbed = new EmbedBuilder()
.setTitle(`Responding to ${message.author.tag}`)
.setDescription(result || 'No Content Yet...')
.setColor('#00FF00')
// edit the message
sentMessage.edit({ embeds: [streamEmbed] })
}
} else {
response = await blockResponse(params)
result = response.message.content
// long message, split into different embeds sadly.
if (result.length > 5000) {
const firstEmbed = new EmbedBuilder()
.setTitle(`Responding to ${message.author.tag}`)
.setDescription(result.slice(0, 5000) || 'No Content to Provide...')
.setColor('#00FF00')
// replace first embed
sentMessage.edit({ embeds: [firstEmbed] })
// take the rest out
result = result.slice(5000)
// handle the rest
while (result.length > 5000) {
const whileEmbed = new EmbedBuilder()
.setTitle(`Responding to ${message.author.tag}`)
.setDescription(result.slice(0, 5000) || 'No Content to Provide...')
.setColor('#00FF00')
channel.send({ embeds: [whileEmbed] })
result = result.slice(5000)
}
const lastEmbed = new EmbedBuilder()
.setTitle(`Responding to ${message.author.tag}`)
.setDescription(result || 'No Content to Provide...')
.setColor('#00FF00')
// rest of the response
channel.send({ embeds: [lastEmbed] })
} else {
// only need to create 1 embed, handles 6000 characters
const newEmbed = new EmbedBuilder()
.setTitle(`Responding to ${message.author.tag}`)
.setDescription(result || 'No Content to Provide...')
.setColor('#00FF00')
// edit the message
sentMessage.edit({ embeds: [newEmbed] })
}
}
} catch(error: any) {
console.log(`[Util: messageEmbed] Error creating message: ${error.message}`)
const errorEmbed = new EmbedBuilder()
.setTitle(`Responding to ${message.author.tag}`)
.setDescription(`**Response generation failed.**\n\nReason: ${error.message}`)
.setColor('#00FF00')
// send back error
sentMessage.edit({ embeds: [errorEmbed] })
}
// Hope there is a response! undefined otherwie
return result
}

View File

@@ -28,7 +28,7 @@ export async function normalMessage(
model: model, model: model,
ollama: ollama, ollama: ollama,
msgHist: msgHist.getItems() msgHist: msgHist.getItems()
} }
// run query based on stream preference, true = stream, false = block // run query based on stream preference, true = stream, false = block
if (stream) { if (stream) {
@@ -40,14 +40,15 @@ export async function normalMessage(
result = portion.message.content result = portion.message.content
// new message block, wait for it to send and assign new block to respond. // new message block, wait for it to send and assign new block to respond.
await channel.send("Creating new stream block...").then(sentMessage => { messageBlock = sentMessage }) await channel.send("Creating new stream block...")
.then(sentMessage => { messageBlock = sentMessage })
} else { } else {
result += portion.message.content result += portion.message.content
// ensure block is not empty // ensure block is not empty
if (result.length > 5) if (result.length > 5)
messageBlock.edit(result) messageBlock.edit(result)
} }
console.log(result) console.log(result)
} }
} }
@@ -70,10 +71,13 @@ export async function normalMessage(
channel.send(result) channel.send(result)
} else // edit the 'generic' response to new message since <2000 } else // edit the 'generic' response to new message since <2000
sentMessage.edit(result) sentMessage.edit(result)
} }
} catch(error: any) { } catch (error: any) {
console.log(`[Util: messageNormal] Error creating message: ${error.message}`) console.log(`[Util: messageNormal] Error creating message: ${error.message}`)
sentMessage.edit(`**Response generation failed.**\n\nReason: ${error.message}`) if (error.message.includes('try pulling it first'))
sentMessage.edit(`**Response generation failed.**\n\nReason: You do not have the ${model} downloaded. Ask an admin to pull it using the \`pull-model\` command.`)
else
sentMessage.edit(`**Response generation failed.**\n\nReason: ${error.message}`)
} }
}) })

View File

@@ -1,72 +1,85 @@
// describe marks a test suite // describe marks a test suite
// expect takes a value from an expression // expect takes a value from an expression
// it marks a test case // it marks a test case
import { describe, expect, it } from 'vitest' import { describe, expect, it, vi } from 'vitest'
import commands from '../src/commands/index.js' import commands from '../src/commands/index.js'
/** /**
* Commands test suite, tests the commands object * Mocking redis found in client.ts because of the commands
* Each command is to be tested elsewhere, this file */
* is to ensure that the commands object is defined. vi.mock('../src/client.js', () => ({
* redis: {
* @param name name of the test suite createClient: vi.fn(),
* @param fn function holding tests to run connect: vi.fn(),
*/ get: vi.fn(),
describe('Commands Existence', () => { set: vi.fn()
// test definition of commands object }
it('references defined object', () => { }))
// toBe compares the value to the expected value
expect(typeof commands).toBe('object') /**
}) * Commands test suite, tests the commands object
* Each command is to be tested elsewhere, this file
// test specific commands in the object * is to ensure that the commands object is defined.
it('references specific commands', () => { *
const commandsString = commands.map(e => e.name).join(', ') * @param name name of the test suite
expect(commandsString).toBe('thread, private-thread, message-style, message-stream, toggle-chat, shutoff, modify-capacity, clear-user-channel-history, pull-model, switch-model') * @param fn function holding tests to run
}) */
}) describe('Commands Existence', () => {
// test definition of commands object
/** it('references defined object', () => {
* User Commands Test suite for testing out commands // toBe compares the value to the expected value
* that would be run by users when using the application. expect(typeof commands).toBe('object')
*/ })
describe('User Command Tests', () => {
// test capacity command // test specific commands in the object
it('run modify-capacity command', () => { it('references specific commands', () => {
const commandsString = commands.map(e => e.name).join(', ')
}) const expectedCommands = ['thread', 'private-thread', 'message-stream', 'toggle-chat', 'shutoff', 'modify-capacity', 'clear-user-channel-history', 'pull-model', 'switch-model', 'delete-model']
expect(commandsString).toBe(expectedCommands.join(', '))
it('run clear-user-channel-history command', () => { })
})
})
/**
it('run message-stream command', () => { * User Commands Test suite for testing out commands
* that would be run by users when using the application.
}) */
describe('User Command Tests', () => {
it('run message-style command', () => { // test capacity command
it('run modify-capacity command', () => {
})
})
it('run thread command', () => {
it('run clear-user-channel-history command', () => {
})
})
it('run private-thread command', () => {
it('run message-stream command', () => {
})
}) })
/** it('run message-style command', () => {
* Admin Commands Test suite for running administrative
* commands with the application. })
*/
describe('Admin Command Tests', () => { it('run thread command', () => {
it('run shutoff command', () => {
})
})
it('run private-thread command', () => {
it('run toggle-chat command', () => {
})
}) })
/**
* Admin Commands Test suite for running administrative
* commands with the application.
*/
describe('Admin Command Tests', () => {
it('run shutoff command', () => {
})
it('run toggle-chat command', () => {
})
}) })

View File

@@ -8,7 +8,7 @@ import { Queue } from '../src/queues/queue.js'
* @param fn function holding tests to run * @param fn function holding tests to run
*/ */
describe('Queue Structure', () => { describe('Queue Structure', () => {
let queue= new Queue<string>() let queue = new Queue<string>()
// test for queue creation // test for queue creation
it('creates a new queue', () => { it('creates a new queue', () => {