diff --git a/.env.sample b/.env.sample index 9e71d51..7e4dd37 100644 --- a/.env.sample +++ b/.env.sample @@ -7,12 +7,18 @@ GUILD_ID = GUILD_ID # Channel where the bot listens to messages CHANNEL_ID = CHANNEL_ID -# model for the bot to query from (i.e. llama2 [llama2:13b], mistral, ... ) +# model for the bot to query from (i.e. llama2 [llama2:13b], mistral, codellama, etc... ) MODEL = MODEL_NAME # discord bot user id for mentions CLIENT_UID = BOT_USER_ID -# ip address of docker container, this will have to be found manually (docker can also change it too) +# ip/port address of docker container, I use 172.18.X.X for docker, 127.0.0.1 for local OLLAMA_IP = IP_ADDRESS -OLLAMA_PORT = PORT \ No newline at end of file +OLLAMA_PORT = PORT + +# ip address for discord bot container, I use 172.18.X.X, use different IP than ollama_ip +DISCORD_IP = IP_ADDRESS + +# subnet address, ex. 172.18.0.0 as we use /16. +SUBNET_ADDRESS = ADDRESS \ No newline at end of file diff --git a/.gitignore b/.gitignore index e4f0ec2..f4a9ce6 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,9 @@ # Created by https://www.toptal.com/developers/gitignore/api/node # Edit at https://www.toptal.com/developers/gitignore?templates=node +# config +config.json + # builds build/ dist/ diff --git a/Dockerfile b/Dockerfile index 1c3e12f..540a9cd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,6 +7,7 @@ WORKDIR /app # copy package.json and the lock file into the container, and src files COPY ./src ./src COPY ./*.json ./ +COPY ./.env ./ # install dependencies, breaks RUN npm install diff --git a/README.md b/README.md index 788d95e..17be936 100644 --- a/README.md +++ b/README.md @@ -1,39 +1,14 @@ # Discord Ollama Integration [![License: CC BY-NC 4.0](https://img.shields.io/badge/License-CC_BY--NC_4.0-darkgreen.svg)](https://creativecommons.org/licenses/by-nc/4.0/) [![Release Badge](https://img.shields.io/github/v/release/kevinthedang/discord-ollama?logo=github)](https://github.com/kevinthedang/discord-ollama/releases/latest) Ollama is an AI model management tool that allows users to install and use custom large language models locally. The goal is to create a discord bot that will utilize Ollama and chat with it on a Discord! -## Ollama Setup -* Go to Ollama's [Linux download page](https://ollama.ai/download/linux) and run the simple curl command they provide. The command should be `curl https://ollama.ai/install.sh | sh`. -* Now the the following commands in separate terminals to test out how it works! - * In terminal 1 -> `ollama serve` to setup ollama - * In terminal 2 -> `ollama run [model name]`, for example `ollama run llama2` - * The models can vary as you can create your own model. You can also view ollama's [library](https://ollama.ai/library) of models. - * This can also be done in [wsl](https://learn.microsoft.com/en-us/windows/wsl/install) for Windows machines. -* You can now interact with the model you just ran (it might take a second to startup). - * Response time varies with processing power! - -## Project Setup +## Environment Setup * Clone this repo using `git clone https://github.com/kevinthedang/discord-ollama.git` or just use [GitHub Desktop](https://desktop.github.com/) to clone the repo. * You will need a `.env` file in the root of the project directory with the bot's token. There is a `.env.sample` is provided for you as a reference for what environment variables. * For example, `CLIENT_TOKEN = [Bot Token]` - -## To Run (with Docker) -* Follow this guide to setup [Docker](https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-on-ubuntu-20-04) - * If on Windows, download [Docker Desktop](https://docs.docker.com/desktop/install/windows-install/) to get the docker engine. -* You will need a model in the container for this to work properly, on Docker Desktop go to the `Containers` tab, select the `ollama` container, and select `Exec` to run as root on your container. Now, run `ollama pull [model name]` to get your model. - * For Linux Servers, you need another shell to pull the model, or if you run `docker-compose build && docker-compose up -d`, then it will run in the background to keep your shell. Run `docker exec -it ollama bash` to get into the container and run the samme pull command above. -* There is no need to install any npm packages for this, you just need to run `npm run start` to pull the containers and spin them up. -* For cleaning up on Linux (or Windows), run the following commands: - * `docker-compose stop` - * `docker-compose rm` - * `docker ps` to check if containers have been removed. - -## To Run Locally (without Docker) -* Run `npm install` to install the npm packages. -* Now, you can run the bot by running `npm run client` which will build and run the decompiled typescript and run the setup for ollama. - * **IMPORTANT**: This must be ran in the wsl/Linux instance to work properly! Using Command Prompt/Powershell/Git Bash/etc. will not work on Windows (at least in my experience). - * Refer to the [resources](#resources) on what node version to use. -* Open up a separate terminal/shell (you will need wsl for this if on windows) and run `ollama serve` to startup ollama. - * If you do not have a model, you will need to run `ollama pull [model name]` in a separate terminal to get it. +* Please refer to the docs for bot setup. **NOTE**: These guides assume you already know how to setup a bot account for discord. + * [Local Machine Setup](./docs/setup-local.md) + * [Docker Setup for Servers and Local Machines](./docs/setup-docker.md) + * Local use is not recommended. ## Resources * [NodeJS](https://nodejs.org/en) diff --git a/docker-compose.yml b/docker-compose.yml index 21fbf65..a37bb93 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -8,6 +8,7 @@ services: build: ./ # find docker file in designated path container_name: discord restart: always # rebuild container always + image: discord/bot:0.2.0 environment: CLIENT_TOKEN: ${CLIENT_TOKEN} GUILD_ID: ${GUILD_ID} @@ -17,7 +18,8 @@ services: OLLAMA_IP: ${OLLAMA_IP} OLLAMA_PORT: ${OLLAMA_PORT} networks: - - ollama-net + ollama-net: + ipv4_address: ${DISCORD_IP} volumes: - discord:/src/app # docker will not make this for you, make it yourself @@ -27,18 +29,25 @@ services: container_name: ollama restart: always networks: - - ollama-net + ollama-net: + ipv4_address: ${OLLAMA_IP} + # runtime: nvidia # use Nvidia Container Toolkit for GPU support # devices: # - /dev/nvidia0 volumes: - ollama:/root/.ollama ports: - - 11434:11434 + - ${OLLAMA_PORT}:${OLLAMA_PORT} +# create a network that supports giving addresses withing a specific subnet networks: ollama-net: driver: bridge + ipam: + driver: default + config: + - subnet: ${SUBNET_ADDRESS}/16 volumes: ollama: diff --git a/docs/setup-docker.md b/docs/setup-docker.md new file mode 100644 index 0000000..7e7246a --- /dev/null +++ b/docs/setup-docker.md @@ -0,0 +1,37 @@ +## Docker Setup +* Follow this guide to setup [Docker](https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-on-ubuntu-20-04) + * If on Windows, download [Docker Desktop](https://docs.docker.com/desktop/install/windows-install/) to get the docker engine. +* Please also install [Docker Compose](https://docs.docker.com/compose/install/linux/) for easy running. If not, there are [scripts](#manual-run-with-docker) to set everything up. + +## To Run (with Docker and Docker Compose) +* With the inclusion of subnets in the `docker-compose.yml`, you will need to set the `SUBNET_ADDRESS`, `OLLAMA_IP`, `OLLAMA_PORT`, and `DISCORD_IP`. Here are some default values if you don't care: + * `OLLAMA_IP = 172.18.0.2` + * `OLLAMA_PORT = 11434` + * `DISCORD_IP = 172.18.0.3` + * `SUBNET_ADDRESS = 172.18.0.0` + * Don't understand any of this? watch a Networking video to understand subnetting. +* You will need a model in the container for this to work properly, on Docker Desktop go to the `Containers` tab, select the `ollama` container, and select `Exec` to run as root on your container. Now, run `ollama pull [model name]` to get your model. + * For Linux Servers, you need another shell to pull the model, or if you run `docker compose build && docker compose up -d`, then it will run in the background to keep your shell. Run `docker exec -it ollama bash` to get into the container and run the samme pull command above. +* Otherwise, there is no need to install any npm packages for this, you just need to run `npm run start` to pull the containers and spin them up. +* For cleaning up on Linux (or Windows), run the following commands: + * `docker compose stop` + * `docker compose rm` + * `docker ps` to check if containers have been removed. +* You can also use `npm run clean` to clean up the containers and remove the network to address a possible `Address already in use` problem. + +## Manual Run (with Docker) +* Run the following commands: + * `npm run docker:build` + * `npm run docker:ollama` + * `npm run docker:client` + * `docker ps` to see if the containers are there! + * Names should be **discord** and **ollama**. + * You can also just run `npm run docker:start` now for the above commands. +* Clean-up: + * `docker ps` for the conatiner id's. Use `-a` flag as necessary. + * `docker rm -f discord && docker rm -f ollama` to remove the containers. + * `docker rm -f CONTAINER_ID` do for both containers if naming issues arise. + * `docker network rm ollama-net` removes the network. + * `docker network prune` will also work so long as the network is unused. +* Remove Image: + * If you need to remove the image run `docker image rm IMAGE_ID`. You can get the image id by running `docker images`. \ No newline at end of file diff --git a/docs/setup-local.md b/docs/setup-local.md new file mode 100644 index 0000000..9dede13 --- /dev/null +++ b/docs/setup-local.md @@ -0,0 +1,19 @@ +## Ollama Setup +* Go to Ollama's [Linux download page](https://ollama.ai/download/linux) and run the simple curl command they provide. The command should be `curl https://ollama.ai/install.sh | sh`. +* Now the the following commands in separate terminals to test out how it works! + * In terminal 1 -> `ollama serve` to setup ollama + * In terminal 2 -> `ollama run [model name]`, for example `ollama run llama2` + * The models can vary as you can create your own model. You can also view ollama's [library](https://ollama.ai/library) of models. + * If there are any issues running ollama because of missing LLMs, run `ollama pull [model name]` as it will pull the model if Ollama has it in their library. + * This can also be done in [wsl](https://learn.microsoft.com/en-us/windows/wsl/install) for Windows machines. +* You can now interact with the model you just ran (it might take a second to startup). + * Response time varies with processing power! + +## To Run Locally (without Docker) +* Run `npm install` to install the npm packages. +* Ensure that your [.env](../.env.sample) file's `OLLAMA_IP` is `127.0.0.1` to work properly. +* Now, you can run the bot by running `npm run client` which will build and run the decompiled typescript and run the setup for ollama. + * **IMPORTANT**: This must be ran in the wsl/Linux instance to work properly! Using Command Prompt/Powershell/Git Bash/etc. will not work on Windows (at least in my experience). + * Refer to the [resources](../README.md#resources) on what node version to use. +* Open up a separate terminal/shell (you will need wsl for this if on windows) and run `ollama serve` to startup ollama. + * If you do not have a model, you will need to run `ollama pull [model name]` in a separate terminal to get it. \ No newline at end of file diff --git a/package-lock.json b/package-lock.json index 87f27fa..ea4ff98 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "discord-ollama", - "version": "0.2.0", + "version": "0.3.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "discord-ollama", - "version": "0.2.0", + "version": "0.3.0", "license": "ISC", "dependencies": { "axios": "^1.6.2", @@ -504,17 +504,17 @@ } }, "node_modules/@fastify/busboy": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@fastify/busboy/-/busboy-2.1.0.tgz", - "integrity": "sha512-+KpH+QxZU7O4675t3mnkQKcZZg56u+K/Ct2K+N2AZYNVK8kyeo/bI18tI8aPm3tvNNRyTWfj6s5tnGNlcbQRsA==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@fastify/busboy/-/busboy-2.1.1.tgz", + "integrity": "sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA==", "engines": { "node": ">=14" } }, "node_modules/@jridgewell/resolve-uri": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.1.tgz", - "integrity": "sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", "dev": true, "engines": { "node": ">=6.0.0" @@ -591,9 +591,9 @@ "dev": true }, "node_modules/@types/node": { - "version": "20.11.14", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.14.tgz", - "integrity": "sha512-w3yWCcwULefjP9DmDDsgUskrMoOy5Z8MiwKHr1FvqGPtx7CvJzQvxD7eKpxNtklQxLruxSXWddyeRtyud0RcXQ==", + "version": "20.11.28", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.28.tgz", + "integrity": "sha512-M/GPWVS2wLkSkNHVeLkrF2fD5Lx5UC4PxA0uZcKc6QqbIQUJyW1jVjueJYi1z8n0I5PxYrtpnPnWglE+y9A0KA==", "dependencies": { "undici-types": "~5.26.4" } @@ -667,11 +667,11 @@ "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" }, "node_modules/axios": { - "version": "1.6.7", - "resolved": "https://registry.npmjs.org/axios/-/axios-1.6.7.tgz", - "integrity": "sha512-/hDJGff6/c7u0hDkvkGxR/oy6CbCs8ziCsC7SqmhjfozqiJGc8Z11wrv9z9lYfY4K8l+H9TpjcMDX0xOZmx+RA==", + "version": "1.6.8", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.6.8.tgz", + "integrity": "sha512-v/ZHtJDU39mDpyBoFVkETcd/uNdxrWRrg3bKpOKzXFA6Bvqopts6ALSMU3y6ijYxbw2B+wPrIv46egTzJXCLGQ==", "dependencies": { - "follow-redirects": "^1.15.4", + "follow-redirects": "^1.15.6", "form-data": "^4.0.0", "proxy-from-env": "^1.1.0" } @@ -683,12 +683,15 @@ "dev": true }, "node_modules/binary-extensions": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", - "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", "dev": true, "engines": { "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/brace-expansion": { @@ -714,16 +717,10 @@ } }, "node_modules/chokidar": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", - "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", "dev": true, - "funding": [ - { - "type": "individual", - "url": "https://paulmillr.com/funding/" - } - ], "dependencies": { "anymatch": "~3.1.2", "braces": "~3.0.2", @@ -736,6 +733,9 @@ "engines": { "node": ">= 8.10.0" }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, "optionalDependencies": { "fsevents": "~2.3.2" } @@ -827,14 +827,14 @@ } }, "node_modules/dotenv": { - "version": "16.4.1", - "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.1.tgz", - "integrity": "sha512-CjA3y+Dr3FyFDOAMnxZEGtnW9KBR2M0JvvUtXNW+dYJL5ROWxP9DUHCwgFqpMk0OXCc0ljhaNTr2w/kutYIcHQ==", + "version": "16.4.5", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.5.tgz", + "integrity": "sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg==", "engines": { "node": ">=12" }, "funding": { - "url": "https://github.com/motdotla/dotenv?sponsor=1" + "url": "https://dotenvx.com" } }, "node_modules/esbuild": { @@ -893,9 +893,9 @@ } }, "node_modules/follow-redirects": { - "version": "1.15.5", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.5.tgz", - "integrity": "sha512-vSFWUON1B+yAw1VN4xMfxgn5fTUiaOzAJCKBwIIgT/+7CuGy9+r+5gITvP62j3RmaD5Ph65UaERdOSRGUzZtgw==", + "version": "1.15.6", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.6.tgz", + "integrity": "sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==", "funding": [ { "type": "individual", @@ -939,9 +939,9 @@ } }, "node_modules/get-tsconfig": { - "version": "4.7.2", - "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.7.2.tgz", - "integrity": "sha512-wuMsz4leaj5hbGgg4IvDU0bqJagpftG5l5cXIAvo8uZrqn0NJqwtfupTN00VnkQJPcIRrxYrm1Ue24btpCha2A==", + "version": "4.7.3", + "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.7.3.tgz", + "integrity": "sha512-ZvkrzoUA0PQZM6fy6+/Hce561s+faD1rsNwhnO5FelNjyy7EMGJ3Rz1AQ8GYDWjhRs/7dBLOEJvhK8MiEJOAFg==", "dev": true, "dependencies": { "resolve-pkg-maps": "^1.0.0" @@ -962,6 +962,15 @@ "node": ">= 6" } }, + "node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "dev": true, + "engines": { + "node": ">=4" + } + }, "node_modules/ignore-by-default": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/ignore-by-default/-/ignore-by-default-1.0.1.tgz", @@ -1033,9 +1042,9 @@ } }, "node_modules/magic-bytes.js": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/magic-bytes.js/-/magic-bytes.js-1.8.0.tgz", - "integrity": "sha512-lyWpfvNGVb5lu8YUAbER0+UMBTdR63w2mcSUlhhBTyVbxJvjgqwyAf3AZD6MprgK0uHuBoWXSDAMWLupX83o3Q==" + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/magic-bytes.js/-/magic-bytes.js-1.10.0.tgz", + "integrity": "sha512-/k20Lg2q8LE5xiaaSkMXk4sfvI+9EGEykFS4b0CHHGWqDYU0bGUFSwchNOMA56D7TCs9GwVTkqe9als1/ns8UQ==" }, "node_modules/make-error": { "version": "1.3.6", @@ -1081,9 +1090,9 @@ "dev": true }, "node_modules/nodemon": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/nodemon/-/nodemon-3.0.3.tgz", - "integrity": "sha512-7jH/NXbFPxVaMwmBCC2B9F/V6X1VkEdNgx3iu9jji8WxWcvhMWkmhNWhI5077zknOnZnBzba9hZP6bCPJLSReQ==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/nodemon/-/nodemon-3.1.0.tgz", + "integrity": "sha512-xqlktYlDMCepBJd43ZQhjWwMw2obW/JRvkrLxq5RCNcuDDX1DbcPT+qT1IlIIdf+DhnWs90JpTMe+Y5KxOchvA==", "dev": true, "dependencies": { "chokidar": "^3.5.2", @@ -1108,27 +1117,6 @@ "url": "https://opencollective.com/nodemon" } }, - "node_modules/nodemon/node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/nodemon/node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "dev": true, - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, "node_modules/nopt": { "version": "1.0.10", "resolved": "https://registry.npmjs.org/nopt/-/nopt-1.0.10.tgz", @@ -1154,9 +1142,9 @@ } }, "node_modules/ollama": { - "version": "0.4.6", - "resolved": "https://registry.npmjs.org/ollama/-/ollama-0.4.6.tgz", - "integrity": "sha512-/Im2atcM9hAxOgEi7mc5pG2G+MeN4jFo1bubfCzAd8bZT6nQ3he5tr+jypGufau9+WQKY0MHhTajqKTNfnlZQA==", + "version": "0.4.9", + "resolved": "https://registry.npmjs.org/ollama/-/ollama-0.4.9.tgz", + "integrity": "sha512-hVf5xix5zwswfMN6ydt3K2TS886aOk9RT9xksajXxdIHhi2yokEsdzJYE7IGg+1bjQSPZ6JHzHbESYjNB0pTCg==", "dependencies": { "whatwg-fetch": "^3.6.20" } @@ -1206,9 +1194,9 @@ } }, "node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz", + "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==", "dev": true, "dependencies": { "lru-cache": "^6.0.0" @@ -1232,6 +1220,18 @@ "node": ">=10" } }, + "node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, "node_modules/to-regex-range": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", @@ -1257,9 +1257,9 @@ } }, "node_modules/ts-mixer": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/ts-mixer/-/ts-mixer-6.0.3.tgz", - "integrity": "sha512-k43M7uCG1AkTyxgnmI5MPwKoUvS/bRvLvUb7+Pgpdlmok8AoqmUaZxUUw8zKM5B1lqZrt41GjYgnvAi0fppqgQ==" + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/ts-mixer/-/ts-mixer-6.0.4.tgz", + "integrity": "sha512-ufKpbmrugz5Aou4wcr5Wc1UUFWOLhq+Fm6qa6P0w0K5Qw2yhaUoiWszhCVuNQyNwrlGiscHOmqYoAox1PtvgjA==" }, "node_modules/ts-node": { "version": "10.9.2", @@ -1310,9 +1310,9 @@ "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" }, "node_modules/tsx": { - "version": "4.7.0", - "resolved": "https://registry.npmjs.org/tsx/-/tsx-4.7.0.tgz", - "integrity": "sha512-I+t79RYPlEYlHn9a+KzwrvEwhJg35h/1zHsLC2JXvhC2mdynMv6Zxzvhv5EMV6VF5qJlLlkSnMVvdZV3PSIGcg==", + "version": "4.7.1", + "resolved": "https://registry.npmjs.org/tsx/-/tsx-4.7.1.tgz", + "integrity": "sha512-8d6VuibXHtlN5E3zFkgY8u4DX7Y3Z27zvvPKVmLon/D4AjuKzarkUBTLDBgj9iTQ0hg5xM7c/mYiRVM+HETf0g==", "dev": true, "dependencies": { "esbuild": "~0.19.10", @@ -1329,9 +1329,9 @@ } }, "node_modules/typescript": { - "version": "5.3.3", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.3.3.tgz", - "integrity": "sha512-pXWcraxM0uxAS+tN0AG/BF2TyqmHO014Z070UsJ+pFvYuRSq8KH8DmWpnbXe0pEPDHXZV3FcAbJkijJ5oNEnWw==", + "version": "5.4.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.4.2.tgz", + "integrity": "sha512-+2/g0Fds1ERlP6JsakQQDXjZdZMM+rqpamFZJEKh4kwTIn3iDkgKtby0CeNd5ATNZ4Ry1ax15TMx0W2V+miizQ==", "dev": true, "bin": { "tsc": "bin/tsc", diff --git a/package.json b/package.json index 414e076..9f911ac 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "discord-ollama", - "version": "0.2.0", + "version": "0.3.0", "description": "Ollama Integration into discord", "main": "build/index.js", "exports": "./build/index.js", @@ -9,9 +9,15 @@ "dev-mon": "nodemon --config nodemon.json src/index.ts", "build": "tsc", "prod": "node .", - "client": "npm i && npm run build && npm run prod", - "clean": "docker rmi $(docker images -a -q) -f && docker images -a", - "start": "echo \"y\" | docker-compose rm && docker-compose build --no-cache && docker-compose up" + "client": "npm run build && npm run prod", + "clean": "docker compose down && docker rmi $(docker images | grep 0.2.0 | tr -s ' ' | cut -d ' ' -f 3) && docker rmi $(docker images --filter \"dangling=true\" -q --no-trunc)", + "start": "docker compose build --no-cache && docker compose up -d", + "docker:start": "npm run docker:network && npm run docker:build && npm run docker:client && npm run docker:ollama", + "docker:clean": "docker rmi $(docker images --filter \"dangling=true\" -q --no-trunc)", + "docker:network": "docker network create --subnet=172.18.0.0/16 ollama-net", + "docker:build": "docker build --no-cache -t discord/bot:0.2.0 .", + "docker:client": "docker run -d -v discord:/src/app --name discord --network ollama-net --ip 172.18.0.3 discord", + "docker:ollama": "docker run -d --gpus=all -v ollama:/root/.ollama -p 11434:11434 --name ollama --network ollama-net --ip 172.18.0.2 ollama/ollama:latest" }, "author": "Kevin Dang", "license": "ISC", diff --git a/src/commands/index.ts b/src/commands/index.ts index f7609d4..6dcfb8d 100644 --- a/src/commands/index.ts +++ b/src/commands/index.ts @@ -1,6 +1,10 @@ import { SlashCommand } from '../utils/commands.js' import { ThreadCreate } from './threadCreate.js' +import { MessageStyle } from './messageStyle.js' +import { MessageStream } from './messageStream.js' export default [ - ThreadCreate + ThreadCreate, + MessageStyle, + MessageStream ] as SlashCommand[] \ No newline at end of file diff --git a/src/commands/messageStream.ts b/src/commands/messageStream.ts new file mode 100644 index 0000000..4e1965d --- /dev/null +++ b/src/commands/messageStream.ts @@ -0,0 +1,33 @@ +import { ApplicationCommandOptionType, ChannelType, Client, CommandInteraction } from 'discord.js' +import { SlashCommand } from '../utils/commands.js' +import { openFile } from '../utils/jsonHandler.js' + +export const MessageStream: SlashCommand = { + name: 'message-stream', + description: 'change preference on message streaming from ollama. WARNING: can be very slow.', + + // user option(s) for setting stream + options: [ + { + name: 'stream', + description: 'enable or disable stream preference', + type: ApplicationCommandOptionType.Boolean, + required: true + } + ], + + // change preferences based on command + run: async (client: Client, interaction: CommandInteraction) => { + // verify channel + const channel = await client.channels.fetch(interaction.channelId) + if (!channel || channel.type !== ChannelType.GuildText) return + + // save value to json and write to it + openFile('config.json', interaction.commandName, interaction.options.get('stream')?.value) + + interaction.reply({ + content: `Message streaming preferences for embed set to: \`${interaction.options.get('stream')?.value}\``, + ephemeral: true + }) + } +} \ No newline at end of file diff --git a/src/commands/messageStyle.ts b/src/commands/messageStyle.ts new file mode 100644 index 0000000..023fae8 --- /dev/null +++ b/src/commands/messageStyle.ts @@ -0,0 +1,33 @@ +import { ChannelType, Client, CommandInteraction, ApplicationCommandOptionType } from 'discord.js' +import { SlashCommand } from '../utils/commands.js' +import { openFile } from '../utils/jsonHandler.js' + +export const MessageStyle: SlashCommand = { + name: 'message-style', + description: 'sets the message style to embed or normal', + + // set available user options to pass to the command + options: [ + { + name: 'embed', + description: 'toggle embedded or normal message', + type: ApplicationCommandOptionType.Boolean, + required: true + } + ], + + // Query for message information and set the style + run: async (client: Client, interaction: CommandInteraction) => { + // fetch channel and message + const channel = await client.channels.fetch(interaction.channelId) + if (!channel || channel.type !== ChannelType.GuildText) return + + // set the message style + openFile('config.json', interaction.commandName, interaction.options.get('embed')?.value) + + interaction.reply({ + content: `Message style preferences for embed set to: \`${interaction.options.get('embed')?.value}\``, + ephemeral: true + }) + } +} \ No newline at end of file diff --git a/src/commands/threadCreate.ts b/src/commands/threadCreate.ts index c3ffc72..ce67a6a 100644 --- a/src/commands/threadCreate.ts +++ b/src/commands/threadCreate.ts @@ -17,11 +17,11 @@ export const ThreadCreate: SlashCommand = { }) // Send a message in the thread - thread.send(`**User:** ${interaction.user}`) + thread.send(`**User:** ${interaction.user} \n**People in Coversation:** ${thread.memberCount}`) // user only reply return interaction.reply({ - content: 'I can help you in the Thread below.', + content: `I can help you in the Thread below. \n**Thread ID:** ${thread.id}`, ephemeral: true }) } diff --git a/src/events/interactionCreate.ts b/src/events/interactionCreate.ts index d7f07e0..a8ade72 100644 --- a/src/events/interactionCreate.ts +++ b/src/events/interactionCreate.ts @@ -8,7 +8,7 @@ import commands from '../commands/index.js' export default event(Events.InteractionCreate, async ({ log, client }, interaction) => { if (!interaction.isCommand() || !interaction.isChatInputCommand()) return - log(`Interaction called \'${interaction.commandName}\' from ${interaction.client.user.tag}.`) + log(`Interaction called \'${interaction.commandName}\' from ${interaction.user.tag}.`) // ensure command exists, otherwise kill event const command = commands.find(command => command.name === interaction.commandName) diff --git a/src/events/messageCreate.ts b/src/events/messageCreate.ts index bb01e51..6abbf21 100644 --- a/src/events/messageCreate.ts +++ b/src/events/messageCreate.ts @@ -1,11 +1,13 @@ -import { embedMessage, event, Events } from '../utils/index.js' +import { ChatResponse } from 'ollama' +import { embedMessage, event, Events, normalMessage } from '../utils/index.js' +import { Configuration, getConfig } from '../utils/jsonHandler.js' /** * Max Message length for free users is 2000 characters (bot or not). * @param message the message received from the channel */ export default event(Events.MessageCreate, async ({ log, msgHist, tokens, ollama }, message) => { - log(`Message created \"${message.content}\" from ${message.author.tag}.`) + log(`Message \"${message.content}\" from ${message.author.tag} in channel/thread ${message.channelId}.`) // Hard-coded channel to test output there only, in our case "ollama-endpoint" if (message.channelId != tokens.channel) return @@ -22,18 +24,36 @@ export default event(Events.MessageCreate, async ({ log, msgHist, tokens, ollama content: message.content }) - // Try to query and send embed - const response = await embedMessage(message, ollama, tokens, msgHist) + // Try to query and send embed + try { + const config: Configuration = await new Promise((resolve, reject) => { + getConfig('config.json', (config) => { + if (config === undefined) { + reject(new Error('No Configuration is set up.')) + return + } + resolve(config) + }) + }) - // Try to query and send message - // log(normalMessage(message, tokens, msgHist)) + let response: ChatResponse - // If something bad happened, remove user query and stop - if (response == undefined) { msgHist.pop(); return } + // undefined or false, use normal, otherwise use embed + if (config.options['message-style']) + response = await embedMessage(message, ollama, tokens, msgHist) + else + response = await normalMessage(message, ollama, tokens, msgHist) - // successful query, save it as history - msgHist.push({ - role: 'assistant', - content: response.message.content - }) + // If something bad happened, remove user query and stop + if (response == undefined) { msgHist.pop(); return } + + // successful query, save it as history + msgHist.push({ + role: 'assistant', + content: response.message.content + }) + } catch (error: any) { + msgHist.pop() // remove message because of failure + message.reply(`**Response generation failed.**\n\nReason: ${error.message}\n\nPlease use any config slash command.`) + } }) \ No newline at end of file diff --git a/src/utils/commands.ts b/src/utils/commands.ts index 24af1c7..42f119b 100644 --- a/src/utils/commands.ts +++ b/src/utils/commands.ts @@ -1,10 +1,14 @@ -import { CommandInteraction, ChatInputApplicationCommandData, Client } from 'discord.js' +import { CommandInteraction, ChatInputApplicationCommandData, Client, ApplicationCommandOption } from 'discord.js' /** * interface for how slash commands should be run */ export interface SlashCommand extends ChatInputApplicationCommandData { - run: (client: Client, interaction: CommandInteraction) => void + run: ( + client: Client, + interaction: CommandInteraction, + options?: ApplicationCommandOption[] + ) => void } /** @@ -16,7 +20,28 @@ export function registerCommands(client: Client, commands: SlashCommand[]): void // ensure the bot is online before registering if (!client.application) return + // map commands into an array of names, used to checking registered commands + const commandsToRegister: string[] = commands.map(command => command.name) + + // fetch all the commands and delete them + client.application.commands.fetch().then((fetchedCommands) => { + for (const command of fetchedCommands.values()) { + if (!commandsToRegister.includes(command.name)) { + command.delete().catch(console.error) + console.log(`[Command: ${command.name}] Removed from Discord`) + } + } + }) + + // clear the cache of the commands + client.application.commands.cache.clear() + // iterate through all commands and register them with the bot for (const command of commands) - client.application.commands.create(command) + client.application.commands + .create(command) + .then((c) => { + console.log(`[Command: ${c.name}] Registered on Discord`) + c.options?.forEach((o) => console.log(` - ${o.name}`)) + }) } \ No newline at end of file diff --git a/src/utils/jsonHandler.ts b/src/utils/jsonHandler.ts new file mode 100644 index 0000000..dde619f --- /dev/null +++ b/src/utils/jsonHandler.ts @@ -0,0 +1,56 @@ +import fs from 'fs' + +export interface Configuration { + readonly name: string + options: { + 'message-stream'?: boolean, + 'message-style'?: boolean + } +} + +/** + * Method to open a file in the working directory and modify/create it + * + * @param filename name of the file + * @param key key value to access + * @param value new value to assign + */ +export function openFile(filename: string, key: string, value: any) { + // check if the file exists, if not then make the config file + if (fs.existsSync(filename)) { + fs.readFile(filename, 'utf8', (error, data) => { + if (error) + console.log(`[Error: openFile] Incorrect file format`) + else { + const object = JSON.parse(data) + object['options'][key] = value + fs.writeFileSync(filename, JSON.stringify(object, null, 2)) + } + }) + } else { + const object: Configuration = JSON.parse('{ \"name\": \"Discord Ollama Confirgurations\" }') + + // set standard information for config file and options + object['options'] = { + [key]: value + } + + fs.writeFileSync(filename, JSON.stringify(object, null, 2)) + console.log(`[Util: openFile] Created 'config.json' in working directory`) + } +} + +export async function getConfig(filename: string, callback: (config: Configuration | undefined) => void): Promise { + // attempt to read the file and get the configuration + if (fs.existsSync(filename)) { + fs.readFile(filename, 'utf8', (error, data) => { + if (error) { + callback(undefined) + return // something went wrong... stop + } + callback(JSON.parse(data)) + }) + } else { + callback(undefined) // file not found + } +} \ No newline at end of file diff --git a/src/utils/messageEmbed.ts b/src/utils/messageEmbed.ts index ea14eda..fa3616f 100644 --- a/src/utils/messageEmbed.ts +++ b/src/utils/messageEmbed.ts @@ -52,10 +52,10 @@ export async function embedMessage( // edit the message sentMessage.edit({ embeds: [newEmbed] }) } catch(error: any) { - console.log(`[Event: messageEmbed] Error creating message: ${error.message}`) + console.log(`[Util: messageEmbed] Error creating message: ${error.message}`) const errorEmbed = new EmbedBuilder() .setTitle(`Responding to ${message.author.tag}`) - .setDescription(`Issue creating response: ${error.message}`) + .setDescription(`**Response generation failed.**\n\nReason: ${error.message}`) .setColor('#00FF00') // send back error diff --git a/src/utils/messageNormal.ts b/src/utils/messageNormal.ts index 5286248..bcd94f0 100644 --- a/src/utils/messageNormal.ts +++ b/src/utils/messageNormal.ts @@ -1,5 +1,5 @@ import { Message } from 'discord.js' -import ollama, { ChatResponse } from 'ollama' +import { ChatResponse, Ollama } from 'ollama' import { UserMessage } from './events.js' /** @@ -8,8 +8,9 @@ import { UserMessage } from './events.js' * @param tokens tokens to run query * @param msgHist message history between user and model */ -export function normalMessage( - message: Message, +export async function normalMessage( + message: Message, + ollama: Ollama, tokens: { channel: string, model: string @@ -19,7 +20,7 @@ export function normalMessage( // bot's respnse let response: ChatResponse - message.reply('Generating Response . . .').then(async sentMessage => { + await message.reply('Generating Response . . .').then(async sentMessage => { try { // Attempt to query model for message response = await ollama.chat({ @@ -37,7 +38,8 @@ export function normalMessage( // edit the 'generic' response to new message sentMessage.edit(response.message.content) } catch(error: any) { - sentMessage.edit(error.error) + console.log(`[Util: messageNormal] Error creating message: ${error.message}`) + sentMessage.edit(`**Response generation failed.**\n\nReason: ${error.message}`) } }) diff --git a/src/utils/streamParse.ts b/src/utils/streamParse.ts index 0490543..48e27c6 100644 --- a/src/utils/streamParse.ts +++ b/src/utils/streamParse.ts @@ -4,6 +4,8 @@ import { AxiosResponse } from 'axios' * When running a /api/chat stream, the output needs to be parsed into an array of objects * This method is used for development purposes and testing * + * This will not work as intended with the inclusion of ollama-js, needs to be modified to work with it + * * @param stream Axios response to from Ollama */ export function parseStream(stream: AxiosResponse) {