1 Commits

Author SHA1 Message Date
snyk-bot
69dd68bb5e fix: upgrade discord.js from 14.18.0 to 14.19.3
Snyk has created this PR to upgrade discord.js from 14.18.0 to 14.19.3.

See this package in npm:
discord.js

See this project in Snyk:
https://app.snyk.io/org/jt2m0l3y/project/d8b070a3-e4a3-457a-977b-7eb6a4a48346?utm_source=github&utm_medium=referral&page=upgrade-pr
2025-05-29 09:09:32 +00:00
30 changed files with 273 additions and 180 deletions

View File

@@ -13,3 +13,7 @@ DISCORD_IP = IP_ADDRESS
# subnet address, ex. 172.18.0.0 as we use /16.
SUBNET_ADDRESS = ADDRESS
# redis port and ip, default redis port is 6379
REDIS_IP = IP_ADDRESS
REDIS_PORT = PORT

View File

@@ -34,6 +34,8 @@ jobs:
echo OLLAMA_IP = ${{ secrets.OLLAMA_IP }} >> .env
echo OLLAMA_PORT = ${{ secrets.OLLAMA_PORT }} >> .env
echo MODEL = ${{ secrets.MODEL }} >> .env
echo REDIS_IP = ${{ secrets.REDIS_IP }} >> .env
echo REDIS_PORT = ${{ secrets.REDIS_PORT }} >> .env
# set -e ensures if nohup fails, this section fails
- name: Startup Discord Bot Client
@@ -61,6 +63,8 @@ jobs:
echo OLLAMA_IP = ${{ secrets.OLLAMA_IP }} >> .env
echo OLLAMA_PORT = ${{ secrets.OLLAMA_PORT }} >> .env
echo MODEL = ${{ secrets.MODEL }} >> .env
echo REDIS_IP = ${{ secrets.REDIS_IP }} >> .env
echo REDIS_PORT = ${{ secrets.REDIS_PORT }} >> .env
- name: Setup Docker Network and Images
run: |
@@ -68,8 +72,8 @@ jobs:
- name: Check Images Exist
run: |
(docker images | grep -q 'kevinthedang/discord-ollama' && docker images | grep -qE 'ollama/ollama') || exit 1
(docker images | grep -q 'kevinthedang/discord-ollama' && docker images | grep -qE 'ollama/ollama' | docker images | grep -qE 'redis') || exit 1
- name: Check Containers Exist
run: |
(docker ps | grep -q 'ollama' && docker ps | grep -q 'discord') || exit 1
(docker ps | grep -q 'ollama' && docker ps | grep -q 'discord' && docker ps | grep -q 'redis') || exit 1

View File

@@ -31,10 +31,12 @@ jobs:
echo OLLAMA_IP = ${{ secrets.OLLAMA_IP }} >> .env
echo OLLAMA_PORT = ${{ secrets.OLLAMA_PORT }} >> .env
echo MODEL = ${{ secrets.MODEL }} >> .env
echo REDIS_IP = ${{ secrets.REDIS_IP }} >> .env
echo REDIS_PORT = ${{ secrets.REDIS_PORT }} >> .env
- name: Collect Code Coverage
run: |
LINE_PCT=$(npm run coverage | tail -2 | head -1 | awk '{print $3}')
LINE_PCT=$(npm run test:coverage | tail -2 | head -1 | awk '{print $3}')
echo "COVERAGE=$LINE_PCT" >> $GITHUB_ENV
- name: Upload Code Coverage

View File

@@ -24,6 +24,8 @@ jobs:
echo MODEL = ${{ secrets.MODEL }} >> .env
echo DISCORD_IP = ${{ secrets.DISCORD_IP }} >> .env
echo SUBNET_ADDRESS = ${{ secrets.SUBNET_ADDRESS }} >> .env
echo REDIS_IP = ${{ secrets.REDIS_IP }} >> .env
echo REDIS_PORT = ${{ secrets.REDIS_PORT }} >> .env
- name: Check if directory exists and delete it
run: |
@@ -57,6 +59,7 @@ jobs:
npm install
IMAGE="kevinthedang/discord-ollama"
REDIS="redis"
OLLAMA="ollama/ollama"
if docker images | grep -q $IMAGE; then
@@ -72,6 +75,19 @@ jobs:
echo "Old $IMAGE Image Removed"
fi
if docker images | grep -q $REDIS; then
IMAGE_ID=$(docker images -q $REDIS)
CONTAINER_IDS=$(docker ps -q --filter "ancestor=$IMAGE_ID")
if [ ! -z "$CONTAINER_IDS" ]; then
# Stop and remove the running containers
docker stop $CONTAINER_IDS
echo "Stopped and removed the containers using the image $REDIS"
fi
docker rmi $IMAGE_ID
echo "Old $REDIS Image Removed"
fi
if docker images | grep -q $OLLAMA; then
IMAGE_ID=$(docker images -q $OLLAMA)
CONTAINER_IDS=$(docker ps -q --filter "ancestor=$IMAGE_ID")
@@ -101,6 +117,14 @@ jobs:
--ip ${{ secrets.OLLAMA_IP }} \
ollama/ollama:latest
docker run --rm -d \
-v redis:/root/.redis \
-p ${{ secrets.REDIS_PORT }}:${{ secrets.REDIS_PORT }} \
--name redis \
--network ollama-net \
--ip ${{ secrets.REDIS_IP }} \
redis:latest
docker run --rm -d \
-v discord:/src/app \
--name discord \

View File

@@ -42,7 +42,9 @@ jobs:
echo OLLAMA_IP = ${{ secrets.OLLAMA_IP }} >> .env
echo OLLAMA_PORT = ${{ secrets.OLLAMA_PORT }} >> .env
echo MODEL = ${{ secrets.MODEL }} >> .env
echo REDIS_IP = ${{ secrets.REDIS_IP }} >> .env
echo REDIS_PORT = ${{ secrets.REDIS_PORT }} >> .env
- name: Test Application
run: |
npm run tests
npm run test:run

View File

@@ -14,28 +14,19 @@
Ollama is an AI model management tool that allows users to install and use custom large language models locally.
The project aims to:
* [x] Create a Discord bot that will utilize Ollama and chat to chat with users!
* [x] User and Server Preferences
* [x] Message Persistance
* [x] User Preferences on Chat
* [x] Message Persistance on Channels and Threads
* [x] Threads
* [x] Channels
* [x] Containerization with Docker
* [x] Slash Commands Compatible
* [ ] Summary Command
* [ ] Model Info Command
* [ ] List Models Command
* [x] Pull Model Command
* [x] Switch Model Command
* [x] Delete Model Command
* [x] Create Thread Command
* [x] Create Private Thread Command
* [x] Message Stream Command
* [x] Change Message History Size Command
* [x] Clear Channel History Command (User Only)
* [x] Administrator Role Compatible
* [x] Generated Token Length Handling for >2000
* [x] Token Length Handling of any message size
* [x] Multi-User Chat Generation - This was built in from Ollama `v0.2.1+`
* [ ] Ollama Tool Support Implementation
* [ ] Enhanced Channel Context Awareness
* [ ] Improved User Replied Triggers
* [x] User vs. Server Preferences
* [ ] Redis Caching
* [x] Administrator Role Compatible
* [x] Multi-User Chat Generation (Multiple users chatting at the same time) - This was built in from Ollama `v0.2.1+`
* [x] Automatic and Manual model pulling through the Discord client
Further, Ollama provides the functionality to utilize custom models or provide context for the top-layer of any model available through the Ollama model library.
* [Customize a model](https://github.com/ollama/ollama#customize-a-model)
@@ -63,6 +54,8 @@ These are guides to the features and capabilities of this app.
* This project requires the use of npm version `10.9.0` or above.
* [Ollama](https://ollama.com/)
* [Ollama Docker Image](https://hub.docker.com/r/ollama/ollama)
* [Redis](https://redis.io/)
* [Redis Docker Image](https://hub.docker.com/_/redis)
* [Discord.js Docs](https://discord.js.org/docs/packages/discord.js/main)
* [Setting up Docker (Ubuntu 20.04)](https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-on-ubuntu-20-04)
* [Setting up Nvidia Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html)

View File

@@ -7,12 +7,14 @@ services:
build: ./ # find docker file in designated path
container_name: discord
restart: always # rebuild container always
image: kevinthedang/discord-ollama:0.8.6
image: kevinthedang/discord-ollama:0.8.4
environment:
CLIENT_TOKEN: ${CLIENT_TOKEN}
OLLAMA_IP: ${OLLAMA_IP}
OLLAMA_PORT: ${OLLAMA_PORT}
MODEL: ${MODEL}
REDIS_IP: ${REDIS_IP}
REDIS_PORT: ${REDIS_PORT}
networks:
ollama-net:
ipv4_address: ${DISCORD_IP}
@@ -35,6 +37,19 @@ services:
ports:
- ${OLLAMA_PORT}:${OLLAMA_PORT}
# setup redis container
redis:
image: redis:latest
container_name: redis
restart: always
networks:
ollama-net:
ipv4_address: ${REDIS_IP}
volumes:
- redis:/root/.redis
ports:
- ${REDIS_PORT}:${REDIS_PORT}
# create a network that supports giving addresses withing a specific subnet
networks:
ollama-net:
@@ -47,3 +62,4 @@ networks:
volumes:
ollama:
discord:
redis:

View File

@@ -43,11 +43,13 @@ sudo systemctl restart docker
* [GitHub repository](https://github.com/NVIDIA/nvidia-container-toolkit?tab=readme-ov-file) for Nvidia Container Toolkit
## To Run (with Docker and Docker Compose)
* With the inclusion of subnets in the `docker-compose.yml`, you will need to set the `SUBNET_ADDRESS`, `OLLAMA_IP`, `OLLAMA_PORT`, and `DISCORD_IP`. Here are some default values if you don't care:
* With the inclusion of subnets in the `docker-compose.yml`, you will need to set the `SUBNET_ADDRESS`, `OLLAMA_IP`, `OLLAMA_PORT`, `REDIS_IP`, `REDIS_PORT`, and `DISCORD_IP`. Here are some default values if you don't care:
* `SUBNET_ADDRESS = 172.18.0.0`
* `OLLAMA_IP = 172.18.0.2`
* `OLLAMA_PORT = 11434`
* `DISCORD_IP = 172.18.0.3`
* `REDIS_IP = 172.18.0.4`
* `REDIS_PORT = 6379`
* Don't understand any of this? watch a Networking video to understand subnetting.
* You also need all environment variables shown in [`.env.sample`](../.env.sample)
* Otherwise, there is no need to install any npm packages for this, you just need to run `npm run start` to pull the containers and spin them up.

143
package-lock.json generated
View File

@@ -1,17 +1,18 @@
{
"name": "discord-ollama",
"version": "0.8.5",
"version": "0.8.4",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "discord-ollama",
"version": "0.8.5",
"version": "0.8.4",
"license": "ISC",
"dependencies": {
"discord.js": "^14.20.0",
"discord.js": "^14.19.3",
"dotenv": "^16.5.0",
"ollama": "^0.5.15"
"ollama": "^0.5.15",
"redis": "^4.7.0"
},
"devDependencies": {
"@types/node": "^22.13.14",
@@ -170,9 +171,9 @@
}
},
"node_modules/@discordjs/rest": {
"version": "2.5.1",
"resolved": "https://registry.npmjs.org/@discordjs/rest/-/rest-2.5.1.tgz",
"integrity": "sha512-Tg9840IneBcbrAjcGaQzHUJWFNq1MMWZjTdjJ0WS/89IffaNKc++iOvffucPxQTF/gviO9+9r8kEPea1X5J2Dw==",
"version": "2.5.0",
"resolved": "https://registry.npmjs.org/@discordjs/rest/-/rest-2.5.0.tgz",
"integrity": "sha512-PWhchxTzpn9EV3vvPRpwS0EE2rNYB9pvzDU/eLLW3mByJl0ZHZjHI2/wA8EbH2gRMQV7nu+0FoDF84oiPl8VAQ==",
"license": "Apache-2.0",
"dependencies": {
"@discordjs/collection": "^2.1.1",
@@ -183,7 +184,7 @@
"discord-api-types": "^0.38.1",
"magic-bytes.js": "^1.10.0",
"tslib": "^2.6.3",
"undici": "6.21.3"
"undici": "6.21.1"
},
"engines": {
"node": ">=18"
@@ -217,13 +218,13 @@
}
},
"node_modules/@discordjs/ws": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/@discordjs/ws/-/ws-1.2.3.tgz",
"integrity": "sha512-wPlQDxEmlDg5IxhJPuxXr3Vy9AjYq5xCvFWGJyD7w7Np8ZGu+Mc+97LCoEc/+AYCo2IDpKioiH0/c/mj5ZR9Uw==",
"version": "1.2.2",
"resolved": "https://registry.npmjs.org/@discordjs/ws/-/ws-1.2.2.tgz",
"integrity": "sha512-dyfq7yn0wO0IYeYOs3z79I6/HumhmKISzFL0Z+007zQJMtAFGtt3AEoq1nuLXtcunUE5YYYQqgKvybXukAK8/w==",
"license": "Apache-2.0",
"dependencies": {
"@discordjs/collection": "^2.1.0",
"@discordjs/rest": "^2.5.1",
"@discordjs/rest": "^2.5.0",
"@discordjs/util": "^1.1.0",
"@sapphire/async-queue": "^1.5.2",
"@types/ws": "^8.5.10",
@@ -768,6 +769,65 @@
"node": ">=14"
}
},
"node_modules/@redis/bloom": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/@redis/bloom/-/bloom-1.2.0.tgz",
"integrity": "sha512-HG2DFjYKbpNmVXsa0keLHp/3leGJz1mjh09f2RLGGLQZzSHpkmZWuwJbAvo3QcRY8p80m5+ZdXZdYOSBLlp7Cg==",
"license": "MIT",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/client": {
"version": "1.6.0",
"resolved": "https://registry.npmjs.org/@redis/client/-/client-1.6.0.tgz",
"integrity": "sha512-aR0uffYI700OEEH4gYnitAnv3vzVGXCFvYfdpu/CJKvk4pHfLPEy/JSZyrpQ+15WhXe1yJRXLtfQ84s4mEXnPg==",
"license": "MIT",
"dependencies": {
"cluster-key-slot": "1.1.2",
"generic-pool": "3.9.0",
"yallist": "4.0.0"
},
"engines": {
"node": ">=14"
}
},
"node_modules/@redis/graph": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@redis/graph/-/graph-1.1.1.tgz",
"integrity": "sha512-FEMTcTHZozZciLRl6GiiIB4zGm5z5F3F6a6FZCyrfxdKOhFlGkiAqlexWMBzCi4DcRoyiOsuLfW+cjlGWyExOw==",
"license": "MIT",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/json": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/@redis/json/-/json-1.0.7.tgz",
"integrity": "sha512-6UyXfjVaTBTJtKNG4/9Z8PSpKE6XgSyEb8iwaqDcy+uKrd/DGYHTWkUdnQDyzm727V7p21WUMhsqz5oy65kPcQ==",
"license": "MIT",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/search": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/@redis/search/-/search-1.2.0.tgz",
"integrity": "sha512-tYoDBbtqOVigEDMAcTGsRlMycIIjwMCgD8eR2t0NANeQmgK/lvxNAvYyb6bZDD4frHRhIHkJu2TBRvB0ERkOmw==",
"license": "MIT",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@redis/time-series": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@redis/time-series/-/time-series-1.1.0.tgz",
"integrity": "sha512-c1Q99M5ljsIuc4YdaCwfUEXsofakb9c8+Zse2qxTadu8TalLXuAESzLvFAvNVbkmSlvlzIQOLpBCmWI9wTOt+g==",
"license": "MIT",
"peerDependencies": {
"@redis/client": "^1.0.0"
}
},
"node_modules/@rollup/rollup-android-arm-eabi": {
"version": "4.37.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.37.0.tgz",
@@ -1413,6 +1473,15 @@
"node": ">= 16"
}
},
"node_modules/cluster-key-slot": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/cluster-key-slot/-/cluster-key-slot-1.1.2.tgz",
"integrity": "sha512-RMr0FhtfXemyinomL4hrWcYJxmX6deFdCxpJzhDttxgO1+bcCnkk+9drydLVDmAMG7NE6aN/fl4F7ucU/90gAA==",
"license": "Apache-2.0",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
@@ -1503,24 +1572,24 @@
]
},
"node_modules/discord.js": {
"version": "14.20.0",
"resolved": "https://registry.npmjs.org/discord.js/-/discord.js-14.20.0.tgz",
"integrity": "sha512-5fRTptK2vpuz+bTuAEUQLSo/3AgCSLHl6Mm9+/ofb+8cbbnjWllhtaqRBq7XcpzlBnfNEugKv8HvCwcOtIHpCg==",
"version": "14.19.3",
"resolved": "https://registry.npmjs.org/discord.js/-/discord.js-14.19.3.tgz",
"integrity": "sha512-lncTRk0k+8Q5D3nThnODBR8fR8x2fM798o8Vsr40Krx0DjPwpZCuxxTcFMrXMQVOqM1QB9wqWgaXPg3TbmlHqA==",
"license": "Apache-2.0",
"dependencies": {
"@discordjs/builders": "^1.11.2",
"@discordjs/collection": "1.5.3",
"@discordjs/formatters": "^0.6.1",
"@discordjs/rest": "^2.5.1",
"@discordjs/rest": "^2.5.0",
"@discordjs/util": "^1.1.1",
"@discordjs/ws": "^1.2.3",
"@discordjs/ws": "^1.2.2",
"@sapphire/snowflake": "3.5.3",
"discord-api-types": "^0.38.1",
"fast-deep-equal": "3.1.3",
"lodash.snakecase": "4.1.1",
"magic-bytes.js": "^1.10.0",
"tslib": "^2.6.3",
"undici": "6.21.3"
"undici": "6.21.1"
},
"engines": {
"node": ">=18"
@@ -1661,6 +1730,15 @@
"node": "^8.16.0 || ^10.6.0 || >=11.0.0"
}
},
"node_modules/generic-pool": {
"version": "3.9.0",
"resolved": "https://registry.npmjs.org/generic-pool/-/generic-pool-3.9.0.tgz",
"integrity": "sha512-hymDOu5B53XvN4QT9dBmZxPX4CWhBPPLguTZ9MMFeFa/Kg0xWVfylOVNlJji/E7yTZWFd/q9GO5TxDLq156D7g==",
"license": "MIT",
"engines": {
"node": ">= 4"
}
},
"node_modules/get-tsconfig": {
"version": "4.10.0",
"resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.10.0.tgz",
@@ -2024,6 +2102,23 @@
"node": "^10 || ^12 || >=14"
}
},
"node_modules/redis": {
"version": "4.7.0",
"resolved": "https://registry.npmjs.org/redis/-/redis-4.7.0.tgz",
"integrity": "sha512-zvmkHEAdGMn+hMRXuMBtu4Vo5P6rHQjLoHftu+lBqq8ZTA3RCVC/WzD790bkKKiNFp7d5/9PcSD19fJyyRvOdQ==",
"license": "MIT",
"workspaces": [
"./packages/*"
],
"dependencies": {
"@redis/bloom": "1.2.0",
"@redis/client": "1.6.0",
"@redis/graph": "1.1.1",
"@redis/json": "1.0.7",
"@redis/search": "1.2.0",
"@redis/time-series": "1.1.0"
}
},
"node_modules/resolve-pkg-maps": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz",
@@ -2428,9 +2523,9 @@
}
},
"node_modules/undici": {
"version": "6.21.3",
"resolved": "https://registry.npmjs.org/undici/-/undici-6.21.3.tgz",
"integrity": "sha512-gBLkYIlEnSp8pFbT64yFgGE6UIB9tAkhukC23PmMDCe5Nd+cRqKxSjw5y54MK2AZMgZfJWMaNE4nYUHgi1XEOw==",
"version": "6.21.1",
"resolved": "https://registry.npmjs.org/undici/-/undici-6.21.1.tgz",
"integrity": "sha512-q/1rj5D0/zayJB2FraXdaWxbhWiNKDvu8naDT2dl1yTlvJp4BLtOcp2a5BvgGNQpYYJzau7tf1WgKv3b+7mqpQ==",
"license": "MIT",
"engines": {
"node": ">=18.17"
@@ -2772,6 +2867,12 @@
}
}
},
"node_modules/yallist": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
"license": "ISC"
},
"node_modules/yn": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz",

View File

@@ -1,34 +1,36 @@
{
"name": "discord-ollama",
"version": "0.8.6",
"version": "0.8.4",
"description": "Ollama Integration into discord",
"main": "build/index.js",
"exports": "./build/index.js",
"scripts": {
"tests": "vitest run",
"coverage": "vitest run --coverage",
"test:run": "vitest run",
"test:coverage": "vitest run --coverage",
"watch": "tsx watch src",
"build": "tsc",
"prod": "node .",
"client": "npm run build && npm run prod",
"clean": "docker compose down && docker rmi $(docker images | grep kevinthedang | tr -s ' ' | cut -d ' ' -f 3) && docker rmi $(docker images --filter \"dangling=true\" -q --no-trunc)",
"start": "docker compose build --no-cache && docker compose up -d",
"docker:clean": "docker rm -f discord && docker rm -f ollama && docker network prune -f && docker rmi $(docker images | grep kevinthedang | tr -s ' ' | cut -d ' ' -f 3) && docker rmi $(docker images --filter \"dangling=true\" -q --no-trunc)",
"docker:clean": "docker rm -f discord && docker rm -f ollama && docker rm -f redis && docker network prune -f && docker rmi $(docker images | grep kevinthedang | tr -s ' ' | cut -d ' ' -f 3) && docker rmi $(docker images --filter \"dangling=true\" -q --no-trunc)",
"docker:network": "docker network create --subnet=172.18.0.0/16 ollama-net",
"docker:build": "docker build --no-cache -t kevinthedang/discord-ollama:$(node -p \"require('./package.json').version\") .",
"docker:build-latest": "docker build --no-cache -t kevinthedang/discord-ollama:latest .",
"docker:client": "docker run -d -v discord:/src/app --name discord --network ollama-net --ip 172.18.0.3 kevinthedang/discord-ollama:$(node -p \"require('./package.json').version\")",
"docker:redis": "docker run -d -v redis:/root/.redis -p 6379:6379 --name redis --network ollama-net --ip 172.18.0.4 redis:latest",
"docker:ollama": "docker run -d --gpus=all -v ollama:/root/.ollama -p 11434:11434 --name ollama --network ollama-net --ip 172.18.0.2 ollama/ollama:latest",
"docker:ollama-cpu": "docker run -d -v ollama:/root/.ollama -p 11434:11434 --name ollama --network ollama-net --ip 172.18.0.2 ollama/ollama:latest",
"docker:start": "docker network prune -f && npm run docker:network && npm run docker:build && npm run docker:client && npm run docker:ollama",
"docker:start-cpu": "docker network prune -f && npm run docker:network && npm run docker:build && npm run docker:client && npm run docker:ollama-cpu"
"docker:start": "docker network prune -f && npm run docker:network && npm run docker:build && npm run docker:redis && npm run docker:client && npm run docker:ollama",
"docker:start-cpu": "docker network prune -f && npm run docker:network && npm run docker:build && npm run docker:redis && npm run docker:client && npm run docker:ollama-cpu"
},
"author": "Kevin Dang",
"license": "ISC",
"dependencies": {
"discord.js": "^14.20.0",
"discord.js": "^14.19.3",
"dotenv": "^16.5.0",
"ollama": "^0.5.15"
"ollama": "^0.5.15",
"redis": "^4.7.0"
},
"devDependencies": {
"@types/node": "^22.13.14",
@@ -43,4 +45,4 @@
"npm": ">=10.9.0",
"node": ">=22.12.0"
}
}
}

View File

@@ -1,6 +1,7 @@
import { Client, GatewayIntentBits } from 'discord.js'
import { Ollama } from 'ollama'
import { Queue } from './components/index.js'
import { createClient } from 'redis'
import { Queue } from './queues/queue.js'
import { UserMessage, registerEvents } from './utils/index.js'
import Events from './events/index.js'
import Keys from './keys.js'
@@ -15,6 +16,11 @@ const client = new Client({
]
})
// initialize connection to redis
const redis = createClient({
url: `redis://${Keys.redisHost}:${Keys.redisPort}`,
})
// initialize connection to ollama container
export const ollama = new Ollama({
host: `http://${Keys.ipAddress}:${Keys.portAddress}`,
@@ -26,6 +32,14 @@ const messageHistory: Queue<UserMessage> = new Queue<UserMessage>
// register all events
registerEvents(client, Events, messageHistory, ollama, Keys.defaultModel)
// Try to connect to redis
await redis.connect()
.then(() => console.log('[Redis] Connected'))
.catch((error) => {
console.error('[Redis] Connection Error', error)
process.exit(1)
})
// Try to log in the client
await client.login(Keys.clientToken)
.catch((error) => {

View File

@@ -1,4 +1,4 @@
import { Client, ChatInputCommandInteraction, ApplicationCommandOptionType, MessageFlags } from 'discord.js'
import { Client, CommandInteraction, ApplicationCommandOptionType, MessageFlags } from 'discord.js'
import { openConfig, SlashCommand, UserCommand } from '../utils/index.js'
export const Capacity: SlashCommand = {
@@ -16,14 +16,14 @@ export const Capacity: SlashCommand = {
],
// Query for message information and set the style
run: async (client: Client, interaction: ChatInputCommandInteraction) => {
run: async (client: Client, interaction: CommandInteraction) => {
// fetch channel and message
const channel = await client.channels.fetch(interaction.channelId)
if (!channel || !UserCommand.includes(channel.type)) return
// set state of bot chat features
openConfig(`${interaction.user.username}-config.json`, interaction.commandName,
interaction.options.getNumber('context-capacity')
interaction.options.get('context-capacity')?.value
)
interaction.reply({

View File

@@ -1,4 +1,4 @@
import { ApplicationCommandOptionType, ChatInputCommandInteraction, Client, CommandInteraction, MessageFlags } from 'discord.js'
import { ApplicationCommandOptionType, Client, CommandInteraction, MessageFlags } from 'discord.js'
import { UserCommand, SlashCommand } from '../utils/index.js'
import { ollama } from '../client.js'
import { ModelResponse } from 'ollama'
@@ -18,11 +18,10 @@ export const DeleteModel: SlashCommand = {
],
// Delete Model locally stored
run: async (client: Client, interaction: ChatInputCommandInteraction) => {
run: async (client: Client, interaction: CommandInteraction) => {
// defer reply to avoid timeout
await interaction.deferReply()
const modelInput: string = interaction.options.getString('model-name') as string
let ollamaOffline: boolean = false
const modelInput: string = interaction.options.get('model-name')!!.value as string
// fetch channel and message
const channel = await client.channels.fetch(interaction.channelId)
@@ -38,22 +37,9 @@ export const DeleteModel: SlashCommand = {
}
// check if model exists
const modelExists = await ollama.list()
const modelExists: boolean = await ollama.list()
.then(response => response.models.some((model: ModelResponse) => model.name.startsWith(modelInput)))
.catch(error => {
ollamaOffline = true
console.error(`[Command: delete-model] Failed to connect with Ollama service. Error: ${error.message}`)
})
// Validate for any issue or if service is running
if (ollamaOffline) {
interaction.editReply({
content: `The Ollama service is not running. Please turn on/download the [service](https://ollama.com/).`
})
return
}
try {
// call ollama to delete model
if (modelExists) {

View File

@@ -1,4 +1,4 @@
import { Client, ChatInputCommandInteraction, ApplicationCommandOptionType, MessageFlags } from 'discord.js'
import { Client, CommandInteraction, ApplicationCommandOptionType, MessageFlags } from 'discord.js'
import { AdminCommand, openConfig, SlashCommand } from '../utils/index.js'
export const Disable: SlashCommand = {
@@ -16,7 +16,7 @@ export const Disable: SlashCommand = {
],
// Query for message information and set the style
run: async (client: Client, interaction: ChatInputCommandInteraction) => {
run: async (client: Client, interaction: CommandInteraction) => {
// fetch channel and message
const channel = await client.channels.fetch(interaction.channelId)
if (!channel || !AdminCommand.includes(channel.type)) return
@@ -32,11 +32,11 @@ export const Disable: SlashCommand = {
// set state of bot chat features
openConfig(`${interaction.guildId}-config.json`, interaction.commandName,
interaction.options.getBoolean('enabled')
interaction.options.get('enabled')?.value
)
interaction.reply({
content: `${client.user?.username} is now **${interaction.options.getBoolean('enabled') ? "enabled" : "disabled"}**.`,
content: `${client.user?.username} is now **${interaction.options.get('enabled')?.value ? "enabled" : "disabled"}**.`,
flags: MessageFlags.Ephemeral
})
}

View File

@@ -1,4 +1,4 @@
import { ApplicationCommandOptionType, Client, ChatInputCommandInteraction, MessageFlags } from 'discord.js'
import { ApplicationCommandOptionType, Client, CommandInteraction, MessageFlags } from 'discord.js'
import { openConfig, SlashCommand, UserCommand } from '../utils/index.js'
export const MessageStream: SlashCommand = {
@@ -16,18 +16,18 @@ export const MessageStream: SlashCommand = {
],
// change preferences based on command
run: async (client: Client, interaction: ChatInputCommandInteraction) => {
run: async (client: Client, interaction: CommandInteraction) => {
// verify channel
const channel = await client.channels.fetch(interaction.channelId)
if (!channel || !UserCommand.includes(channel.type)) return
// save value to json and write to it
openConfig(`${interaction.user.username}-config.json`, interaction.commandName,
interaction.options.getBoolean('stream')
interaction.options.get('stream')?.value
)
interaction.reply({
content: `Message streaming is now set to: \`${interaction.options.getBoolean('stream')}\``,
content: `Message streaming is now set to: \`${interaction.options.get('stream')?.value}\``,
flags: MessageFlags.Ephemeral
})
}

View File

@@ -1,4 +1,4 @@
import { ApplicationCommandOptionType, Client, ChatInputCommandInteraction, MessageFlags } from "discord.js"
import { ApplicationCommandOptionType, Client, CommandInteraction, MessageFlags } from "discord.js"
import { ollama } from "../client.js"
import { ModelResponse } from "ollama"
import { UserCommand, SlashCommand } from "../utils/index.js"
@@ -18,11 +18,10 @@ export const PullModel: SlashCommand = {
],
// Pull for model from Ollama library
run: async (client: Client, interaction: ChatInputCommandInteraction) => {
run: async (client: Client, interaction: CommandInteraction) => {
// defer reply to avoid timeout
await interaction.deferReply()
const modelInput: string = interaction.options.getString('model-to-pull') as string
let ollamaOffline: boolean = false
const modelInput: string = interaction.options.get('model-to-pull')!!.value as string
// fetch channel and message
const channel = await client.channels.fetch(interaction.channelId)
@@ -37,22 +36,9 @@ export const PullModel: SlashCommand = {
return
}
// check if model was already pulled, if the ollama service isn't running throw error
const modelExists = await ollama.list()
// check if model was already pulled
const modelExists: boolean = await ollama.list()
.then(response => response.models.some((model: ModelResponse) => model.name.startsWith(modelInput)))
.catch(error => {
ollamaOffline = true
console.error(`[Command: pull-model] Failed to connect with Ollama service. Error: ${error.message}`)
})
// Validate for any issue or if service is running
if (ollamaOffline) {
interaction.editReply({
content: `The Ollama service is not running. Please turn on/download the [service](https://ollama.com/).`
})
return
}
try {
// call ollama to pull desired model

View File

@@ -1,4 +1,4 @@
import { ApplicationCommandOptionType, Client, ChatInputCommandInteraction } from "discord.js"
import { ApplicationCommandOptionType, Client, CommandInteraction } from "discord.js"
import { ollama } from "../client.js"
import { ModelResponse } from "ollama"
import { openConfig, UserCommand, SlashCommand } from "../utils/index.js"
@@ -18,10 +18,10 @@ export const SwitchModel: SlashCommand = {
],
// Switch user preferred model if available in local library
run: async (client: Client, interaction: ChatInputCommandInteraction) => {
run: async (client: Client, interaction: CommandInteraction) => {
await interaction.deferReply()
const modelInput: string = interaction.options.getString('model-to-use') as string
const modelInput: string = interaction.options.get('model-to-use')!!.value as string
// fetch channel and message
const channel = await client.channels.fetch(interaction.channelId)
@@ -45,9 +45,6 @@ export const SwitchModel: SlashCommand = {
}
}
})
.catch(error => {
console.error(`[Command: switch-model] Failed to connect with Ollama service. Error: ${error.message}`)
})
// todo: problem can be here if async messes up
if (switchSuccess) {
// set model now that it exists
@@ -59,13 +56,10 @@ export const SwitchModel: SlashCommand = {
interaction.editReply({
content: `Could not find **${modelInput}** in local model library.\n\nPlease contact an server admin for access to this model.`
})
} catch (error: any) {
} catch (error) {
// could not resolve user model switch
if (error.message.includes("fetch failed") as string)
error.message = "The Ollama service is not running. Please turn on/download the [service](https://ollama.com/)."
interaction.editReply({
content: `Unable to switch user preferred model to **${modelInput}**.\n\n${error.message}`
content: `Unable to switch user preferred model to **${modelInput}**.\n\n${error}\n\nPossible solution is to request an server admin run \`/pull-model ${modelInput}\` and try again.`
})
return
}

View File

@@ -1,46 +0,0 @@
/**
* @class Logger
* @description A class to handle logging messages
* @method log
*/
export class Logger {
private logPrefix: string = ''
private type: string = 'log'
private constructPrefix(component?: string, method?: string): string {
let prefix = this.type.toUpperCase()
if (component) {
prefix += ` [${component}`
if (method) prefix += `: ${method}`
prefix += ']'
}
return prefix
}
public bind(component?: string, method?: string): CallableFunction {
let tempPrefix = this.constructPrefix(component, method)
if (tempPrefix !== this.logPrefix) this.logPrefix = tempPrefix
switch (this.type) {
case 'warn':
return console.warn.bind(console, this.logPrefix)
case 'error':
return console.error.bind(console, this.logPrefix)
case 'log':
default:
return console.log.bind(console, this.logPrefix)
}
}
public log(type: string, message: unknown, component?: string, method?: string): void {
if (type && type !== this.type) this.type = type
let log = this.bind(component, method)
log(message)
}
}

View File

@@ -1,2 +0,0 @@
export * from './queue.js'
export * from './binder.js'

View File

@@ -1,6 +1,6 @@
import { TextChannel } from 'discord.js'
import { event, Events, normalMessage, UserMessage, clean } from '../utils/index.js'
import {
event, Events, normalMessage, UserMessage, clean,
getChannelInfo, getServerConfig, getUserConfig, openChannelInfo,
openConfig, UserConfig, getAttachmentData, getTextFileAttachmentData
} from '../utils/index.js'
@@ -71,8 +71,9 @@ export default event(Events.MessageCreate, async ({ log, msgHist, ollama, client
userConfig = await new Promise((resolve, reject) => {
getUserConfig(`${message.author.username}-config.json`, (config) => {
if (config === undefined) {
openConfig(`${message.author.username}-config.json`, 'message-style', false)
openConfig(`${message.author.username}-config.json`, 'switch-model', defaultModel)
reject(new Error(`No User Preferences is set up.\n\nCreating new preferences file for ${message.author.username}\nPlease try chatting again.`))
reject(new Error('No User Preferences is set up.\n\nCreating preferences file with \`message-style\` set as \`false\` for regular message style.\nPlease try chatting again.'))
return
}

View File

@@ -4,7 +4,9 @@ export const Keys = {
clientToken: getEnvVar('CLIENT_TOKEN'),
ipAddress: getEnvVar('OLLAMA_IP', '127.0.0.1'), // default ollama ip if none
portAddress: getEnvVar('OLLAMA_PORT', '11434'), // default ollama port if none
defaultModel: getEnvVar('MODEL', 'llama3.2')
defaultModel: getEnvVar('MODEL', 'llama3.2'),
redisHost: getEnvVar('REDIS_IP', '172.18.0.4'), // default redis host if none
redisPort: parseInt(getEnvVar('REDIS_PORT', '6379')) // default redis port if none
} as const // readonly keys
export default Keys

View File

@@ -1,4 +1,4 @@
import { ChatInputCommandInteraction, ChatInputApplicationCommandData, Client, ApplicationCommandOption } from 'discord.js'
import { CommandInteraction, ChatInputApplicationCommandData, Client, ApplicationCommandOption } from 'discord.js'
/**
* interface for how slash commands should be run
@@ -6,7 +6,7 @@ import { ChatInputCommandInteraction, ChatInputApplicationCommandData, Client, A
export interface SlashCommand extends ChatInputApplicationCommandData {
run: (
client: Client,
interaction: ChatInputCommandInteraction,
interaction: CommandInteraction,
options?: ApplicationCommandOption[]
) => void
}

View File

@@ -1,6 +1,6 @@
import type { ClientEvents, Awaitable, Client } from 'discord.js'
import { Ollama } from 'ollama'
import { Queue } from '../components/index.js'
import { Queue } from '../queues/queue.js'
// Export events through here to reduce amount of imports
export { Events } from 'discord.js'

View File

@@ -64,7 +64,7 @@ export async function clearChannelInfo(filename: string, channel: TextChannel, u
* @param user the user's name
* @param messages their messages
*/
export async function openChannelInfo(this: any, filename: string, channel: TextChannel | ThreadChannel, user: string, messages: UserMessage[] = []): Promise<void> {
export async function openChannelInfo(filename: string, channel: TextChannel | ThreadChannel, user: string, messages: UserMessage[] = []): Promise<void> {
const fullFileName = `data/${filename}-${user}.json`
if (fs.existsSync(fullFileName)) {
fs.readFile(fullFileName, 'utf8', (error, data) => {
@@ -95,7 +95,7 @@ export async function openChannelInfo(this: any, filename: string, channel: Text
// only creating it, no need to add anything
fs.writeFileSync(fullFileName, JSON.stringify(object, null, 2))
console.log(`[Util: ${this.name}] Created '${fullFileName}' in working directory`)
console.log(`[Util: openChannelInfo] Created '${fullFileName}' in working directory`)
}
}

View File

@@ -10,7 +10,7 @@ import path from 'path'
* @param value new value to assign
*/
// add type of change (server, user)
export function openConfig(this: any, filename: string, key: string, value: any) {
export function openConfig(filename: string, key: string, value: any) {
const fullFileName = `data/${filename}`
// check if the file exists, if not then make the config file
@@ -41,7 +41,7 @@ export function openConfig(this: any, filename: string, key: string, value: any)
fs.mkdirSync(directory, { recursive: true })
fs.writeFileSync(`data/${filename}`, JSON.stringify(object, null, 2))
console.log(`[Util: ${this.name}] Created '${filename}' in working directory`)
console.log(`[Util: openConfig] Created '${filename}' in working directory`)
}
}

View File

@@ -1,5 +1,6 @@
import { ChatResponse, AbortableAsyncIterator } from "ollama"
import { ChatResponse } from "ollama"
import { ChatParams } from "../index.js"
import { AbortableAsyncIterator } from "ollama/src/utils.js"
/**
* Method to query the Ollama client for async generation

View File

@@ -1,7 +1,8 @@
import { Message, SendableChannels } from 'discord.js'
import { ChatResponse, Ollama, AbortableAsyncIterator } from 'ollama'
import { ChatResponse, Ollama } from 'ollama'
import { ChatParams, UserMessage, streamResponse, blockResponse } from './index.js'
import { Queue } from '../components/index.js'
import { Queue } from '../queues/queue.js'
import { AbortableAsyncIterator } from 'ollama/src/utils.js'
/**
* Method to send replies as normal text on discord like any other user
@@ -10,7 +11,6 @@ import { Queue } from '../components/index.js'
* @param msgHist message history between user and model
*/
export async function normalMessage(
this: any,
message: Message,
ollama: Ollama,
model: string,
@@ -73,7 +73,7 @@ export async function normalMessage(
sentMessage.edit(result)
}
} catch (error: any) {
console.log(`[Util: ${this.name}] Error creating message: ${error.message}`)
console.log(`[Util: messageNormal] Error creating message: ${error.message}`)
if (error.message.includes('try pulling it first'))
sentMessage.edit(`**Response generation failed.**\n\nReason: You do not have the ${model} downloaded. Ask an admin to pull it using the \`pull-model\` command.`)
else

View File

@@ -5,9 +5,16 @@ import { describe, expect, it, vi } from 'vitest'
import commands from '../src/commands/index.js'
/**
* Mocking client.ts because of the commands
* Mocking redis found in client.ts because of the commands
*/
vi.mock('../src/client.js', () => ({}))
vi.mock('../src/client.js', () => ({
redis: {
createClient: vi.fn(),
connect: vi.fn(),
get: vi.fn(),
set: vi.fn()
}
}))
/**
* Commands test suite, tests the commands object

View File

@@ -1,5 +1,5 @@
import { describe, expect, it } from 'vitest'
import { Queue } from '../src/components/index.js'
import { Queue } from '../src/queues/queue.js'
/**
* Queue test suite, tests the Queue class