Compare commits
7 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7f1326f93e | ||
|
|
359f46a450 | ||
|
|
de15185cff | ||
|
|
1041f4ca0b | ||
|
|
06638fec1f | ||
|
|
32b12e93c0 | ||
|
|
89213c2d39 |
@@ -10,11 +10,11 @@ MODEL = MODEL_NAME
|
||||
# discord bot user id for mentions
|
||||
CLIENT_UID = BOT_USER_ID
|
||||
|
||||
# ip/port address of docker container, I use 172.18.X.X for docker, 127.0.0.1 for local
|
||||
# ip/port address of docker container, I use 172.18.0.3 for docker, 127.0.0.1 for local
|
||||
OLLAMA_IP = IP_ADDRESS
|
||||
OLLAMA_PORT = PORT
|
||||
|
||||
# ip address for discord bot container, I use 172.18.X.X, use different IP than ollama_ip
|
||||
# ip address for discord bot container, I use 172.18.0.2, use different IP than ollama_ip
|
||||
DISCORD_IP = IP_ADDRESS
|
||||
|
||||
# subnet address, ex. 172.18.0.0 as we use /16.
|
||||
|
||||
38
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Bug Report for Fixes/Improvements
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Desktop (please complete the following information):**
|
||||
- OS: [e.g. iOS]
|
||||
- Browser [e.g. chrome, safari]
|
||||
- Version [e.g. 22]
|
||||
|
||||
**Smartphone (please complete the following information):**
|
||||
- Device: [e.g. iPhone6]
|
||||
- OS: [e.g. iOS8.1]
|
||||
- Browser [e.g. stock browser, safari]
|
||||
- Version [e.g. 22]
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
||||
17
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest Features
|
||||
title: ''
|
||||
labels: enhancement
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## Issue
|
||||
A clear and concise description of what the problem/feature is.
|
||||
|
||||
## Solution
|
||||
* Provide steps or ideals to how to implement or investigate this new feature.
|
||||
|
||||
## References
|
||||
* Provide additional context and external references here
|
||||
17
.github/pull_request_template.md
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
## Steps to Creating a Pull Request
|
||||
* Assign yourself as the **Assignee**
|
||||
* Allow one of the Code Owners/Maintainers review the changes proposed by the Pull Request.
|
||||
* Provide appropriate labels as necessary
|
||||
|
||||
> [!TIP]
|
||||
> `enchancement` for new features, `documentation` for modifications to the docs, `performance` for performance related changes, and so on.
|
||||
|
||||
* Provide a description of the work that has been done.
|
||||
* It is nice to know what was added, removed, modified, and with screenshots of those changes.
|
||||
|
||||
> [!TIP]
|
||||
> You can have them under **Added**, **Removed**, **Updates**, and **Screenshots** if any (**Changes** could also be used).
|
||||
|
||||
## After the Pull Request is Opened
|
||||
* One the Pull Request has been created, please add any Issue(s) that are being addressed by this change (if any).
|
||||
* If the reviewer(s) mention any changes or open threads for questions, please resolve those as soon as you can.
|
||||
17
.github/workflows/build.yml
vendored
@@ -4,6 +4,15 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- '/'
|
||||
- '!docs/**'
|
||||
- '!imgs/**'
|
||||
- '!.github/**'
|
||||
- '.github/workflows/**'
|
||||
- '!.gitignore'
|
||||
- '!LICENSE'
|
||||
- '!README'
|
||||
|
||||
jobs:
|
||||
Discord-Node-Build: # test if the node install and run
|
||||
@@ -13,10 +22,10 @@ jobs:
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Node Environment v18.18.2
|
||||
- name: Set up Node Environment lts/hydrogen
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 18.18.2
|
||||
node-version: lts/hydrogen
|
||||
cache: "npm"
|
||||
|
||||
- name: Install Project Dependencies
|
||||
@@ -50,10 +59,10 @@ jobs:
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Node Environment v18.18.2
|
||||
- name: Set up Node Environment lts/hydrogen
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 18.18.2
|
||||
node-version: lts/hydrogen
|
||||
cache: "npm"
|
||||
|
||||
- name: Create Environment Variables
|
||||
|
||||
21
.github/workflows/test.yml
vendored
@@ -1,9 +1,18 @@
|
||||
name: Tests
|
||||
run-name: Test source code for errors
|
||||
run-name: Unit Tests
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- '/'
|
||||
- '!docs/**'
|
||||
- '!imgs/**'
|
||||
- '!.github/**'
|
||||
- '.github/workflows/**'
|
||||
- '!.gitignore'
|
||||
- '!LICENSE'
|
||||
- '!README'
|
||||
|
||||
jobs:
|
||||
Discord-Node-Test:
|
||||
@@ -13,10 +22,10 @@ jobs:
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Node Environment v18.18.2
|
||||
- name: Set up Node Environment lts/hydrogen
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 18.18.2
|
||||
node-version: lts/hydrogen
|
||||
cache: "npm"
|
||||
|
||||
- name: Install Project Dependencies
|
||||
@@ -44,10 +53,10 @@ jobs:
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Node Environment v18.18.2
|
||||
- name: Set up Node Environment lts/hydrogen
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 18.18.2
|
||||
node-version: lts/hydrogen
|
||||
cache: "npm"
|
||||
|
||||
- name: Create Environment Variables
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# use node LTS image for version 18
|
||||
FROM node:18.18.2
|
||||
FROM node:hydrogen-alpine
|
||||
|
||||
# set working directory inside container
|
||||
WORKDIR /app
|
||||
|
||||
26
README.md
@@ -13,13 +13,18 @@ Ollama is an AI model management tool that allows users to install and use custo
|
||||
The project aims to:
|
||||
* [x] Create a Discord bot that will utilize Ollama and chat to chat with users!
|
||||
* [ ] User Preferences on Chat
|
||||
* [ ] Message Persistance on Channels and Threads
|
||||
* [x] Message Persistance on Channels and Threads
|
||||
* [x] Threads
|
||||
* [x] Channels
|
||||
* [x] Containerization with Docker
|
||||
* [x] Slash Commands Compatible
|
||||
* [x] Generated Token Length Handling for >2000 ~~or >6000 characters~~
|
||||
* [x] Generated Token Length Handling for >2000
|
||||
* [x] Token Length Handling of any message size
|
||||
* [ ] External WebUI Integration
|
||||
* [ ] User vs. Server Preferences
|
||||
* [ ] Redis Caching
|
||||
* [x] Administrator Role Compatible
|
||||
* [ ] Multi-User Chat Generation (Multiple users chatting at the same time)
|
||||
* [ ] Automatic and Manual model pulling through the Discord client
|
||||
* [ ] Allow others to create their own models personalized for their own servers!
|
||||
* [ ] Documentation on creating your own LLM
|
||||
* [ ] Documentation on web scrapping and cleaning
|
||||
@@ -28,20 +33,25 @@ The project aims to:
|
||||
* Clone this repo using `git clone https://github.com/kevinthedang/discord-ollama.git` or just use [GitHub Desktop](https://desktop.github.com/) to clone the repo.
|
||||
* You will need a `.env` file in the root of the project directory with the bot's token. There is a `.env.sample` is provided for you as a reference for what environment variables.
|
||||
* For example, `CLIENT_TOKEN = [Bot Token]`
|
||||
* Please refer to the docs for bot setup. **NOTE**: These guides assume you already know how to setup a bot account for discord.
|
||||
* Please refer to the docs for bot setup.
|
||||
* [Local Machine Setup](./docs/setup-local.md)
|
||||
* [Docker Setup for Servers and Local Machines](./docs/setup-docker.md)
|
||||
* Nvidia is recommended for now, but support for other GPUs should be development.
|
||||
* Local use is not recommended.
|
||||
* [Creating a Discord App](./docs/setup-discord-app.md)
|
||||
|
||||
## Resources
|
||||
* [NodeJS](https://nodejs.org/en)
|
||||
* This project uses `v20.10.0+` (npm `10.2.5`). Consider using [nvm](https://github.com/nvm-sh/nvm) for multiple NodeJS versions.
|
||||
* To run dev in `ts-node`, using `v18.18.2` is recommended. **CAUTION**: `v18.19.0` or `lts/hydrogen` will not run properly.
|
||||
* This project runs on `lts\hydrogen`.
|
||||
* To run dev in `ts-node`/`nodemon`, using `v18.18.2` is recommended.
|
||||
* To run dev with `tsx`, you can use `v20.10.0` or earlier.
|
||||
* This project supports any NodeJS version above `16.x.x` to only allow ESModules.
|
||||
* [Ollama](https://ollama.ai/)
|
||||
* [Ollama](https://ollama.com/)
|
||||
* [Ollama Docker Image](https://hub.docker.com/r/ollama/ollama)
|
||||
* **IMPORTANT**: For Nvidia GPU setup, **install** `nvidia container toolkit/runtime` then **configure** it with Docker to utilize Nvidia driver.
|
||||
|
||||
> [!CAUTION]
|
||||
> `v18.X.X` or `lts/hydrogen` will not run properly for `npm run dev-mon`. It is recommended to just use `npm run dev-tsx` for development. The nodemon version will likely be removed in a future update.
|
||||
|
||||
* [Discord Developer Portal](https://discord.com/developers/docs/intro)
|
||||
* [Discord.js Docs](https://discord.js.org/docs/packages/discord.js/main)
|
||||
* [Setting up Docker (Ubuntu 20.04)](https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-on-ubuntu-20-04)
|
||||
|
||||
@@ -8,7 +8,7 @@ services:
|
||||
build: ./ # find docker file in designated path
|
||||
container_name: discord
|
||||
restart: always # rebuild container always
|
||||
image: discord/bot:0.5.0
|
||||
image: discord/bot:0.5.2
|
||||
environment:
|
||||
CLIENT_TOKEN: ${CLIENT_TOKEN}
|
||||
GUILD_ID: ${GUILD_ID}
|
||||
|
||||
47
docs/setup-discord-app.md
Normal file
@@ -0,0 +1,47 @@
|
||||
## Discord App/Bot Setup
|
||||
* Refer to the [Discord Developers](https://discord.com/build/app-developers) tab on their site.
|
||||
* Click on **Getting Started** and it may prompt you to log in. Do that.
|
||||
* You should see this upon logging in.
|
||||
|
||||

|
||||
|
||||
* Click on **Create App**, you should not be prompted to create an App with a name. If you are apart of a team, you may choose to create it for your team or for yourself.
|
||||
|
||||

|
||||
|
||||
* Great! Not you should have your App created. It should bring you to a page like this.
|
||||
|
||||

|
||||
|
||||
* From here, you will need you App's token, navigate to the **Bot** tab and click **Reset Token** to generate a new token to interact with you bot.
|
||||
* The following app will not exist, usage of this token will be pointless as this is a guide.
|
||||
|
||||

|
||||
|
||||
* You will also need your App's **Client ID**, navigate to **OAuth2** and copy your id.
|
||||
|
||||

|
||||
|
||||
* That should be all of the environment variables needed from Discord, now we need this app on your server.
|
||||
* Navigate to **Installation** and Copy the provided **Install Link** to allow your App to your server.
|
||||
* You should set the **Guild Install** permissions as you like, for this purpose we will allow admin priviledges for now. Ensure the **bot** scope is added to do this.
|
||||
|
||||

|
||||

|
||||
|
||||
* Notice that your App's **Client Id** is apart of the **Install Link**.
|
||||
* Paste this link in a web browser and you should see something like this.
|
||||
|
||||

|
||||
|
||||
* Click **Add to Server** and you should see this.
|
||||
|
||||

|
||||
|
||||
* Choose a server to add the App to, then click **Continue** then **Authorize**. You should see this after that.
|
||||
|
||||

|
||||
|
||||
* Congratulations! You should now see you App on your server!
|
||||
|
||||

|
||||
@@ -47,6 +47,7 @@ sudo systemctl restart docker
|
||||
* `DISCORD_IP = 172.18.0.3`
|
||||
* `SUBNET_ADDRESS = 172.18.0.0`
|
||||
* Don't understand any of this? watch a Networking video to understand subnetting.
|
||||
* You also need all environment variables shown in [`.env.sample`](../.env.sample)
|
||||
* You will need a model in the container for this to work properly, on Docker Desktop go to the `Containers` tab, select the `ollama` container, and select `Exec` to run as root on your container. Now, run `ollama pull [model name]` to get your model.
|
||||
* For Linux Servers, you need another shell to pull the model, or if you run `docker compose build && docker compose up -d`, then it will run in the background to keep your shell. Run `docker exec -it ollama bash` to get into the container and run the samme pull command above.
|
||||
* Otherwise, there is no need to install any npm packages for this, you just need to run `npm run start` to pull the containers and spin them up.
|
||||
@@ -54,6 +55,7 @@ sudo systemctl restart docker
|
||||
* `docker compose stop`
|
||||
* `docker compose rm`
|
||||
* `docker ps` to check if containers have been removed.
|
||||
* This may not work if the nvidia installation was done incorrectly. If this is the case, please utilize the [Manual "Clean-up"](#manual-run-with-docker) shown below.
|
||||
* You can also use `npm run clean` to clean up the containers and remove the network to address a possible `Address already in use` problem.
|
||||
|
||||
## Manual Run (with Docker)
|
||||
|
||||
@@ -1,19 +1,24 @@
|
||||
## Ollama Setup
|
||||
* Go to Ollama's [Linux download page](https://ollama.ai/download/linux) and run the simple curl command they provide. The command should be `curl https://ollama.ai/install.sh | sh`.
|
||||
* Now the the following commands in separate terminals to test out how it works!
|
||||
* Since Ollama will run as a systemd service, there is no need to run `ollama serve` unless you disable it. If you do disable it or have an older `ollama` version, do the following:
|
||||
* In terminal 1 -> `ollama serve` to setup ollama
|
||||
* In terminal 2 -> `ollama run [model name]`, for example `ollama run llama2`
|
||||
* The models can vary as you can create your own model. You can also view ollama's [library](https://ollama.ai/library) of models.
|
||||
* If there are any issues running ollama because of missing LLMs, run `ollama pull [model name]` as it will pull the model if Ollama has it in their library.
|
||||
* Otherwise, if you have the latest `ollama`, you can just run `ollama run [model name]` rather than running this in 2 terminals.
|
||||
* If there are any issues running ollama because of missing LLMs, run `ollama pull [model name]` as it will pull the model if Ollama has it in their library.
|
||||
* This can also be done in [wsl](https://learn.microsoft.com/en-us/windows/wsl/install) for Windows machines.
|
||||
* This should also not be a problem is a future feature that allows for pulling of models via discord client. For now, they must be pulled manually.
|
||||
* You can now interact with the model you just ran (it might take a second to startup).
|
||||
* Response time varies with processing power!
|
||||
|
||||
## To Run Locally (without Docker)
|
||||
* Run `npm install` to install the npm packages.
|
||||
* Ensure that your [.env](../.env.sample) file's `OLLAMA_IP` is `127.0.0.1` to work properly.
|
||||
* You only need your `CLIENT_TOKEN`, `GUILD_ID`, `MODEL`, `CLIENT_UID`, `OLLAMA_IP`, `OLLAMA_PORT`.
|
||||
* The ollama ip and port should just use it's defaults by nature. If not, utilize `OLLAMA_IP = 127.0.0.1` and `OLLAMA_PORT = 11434`.
|
||||
* Now, you can run the bot by running `npm run client` which will build and run the decompiled typescript and run the setup for ollama.
|
||||
* **IMPORTANT**: This must be ran in the wsl/Linux instance to work properly! Using Command Prompt/Powershell/Git Bash/etc. will not work on Windows (at least in my experience).
|
||||
* Refer to the [resources](../README.md#resources) on what node version to use.
|
||||
* Open up a separate terminal/shell (you will need wsl for this if on windows) and run `ollama serve` to startup ollama.
|
||||
* If you are using wsl, open up a separate terminal/shell to startup the ollama service. Again, if you are running an older ollama, you must run `ollama serve` in that shell.
|
||||
* If you are on an actual Linux machine/VM there is no need for another terminal (unless you have an older ollama version).
|
||||
* If you do not have a model, you will need to run `ollama pull [model name]` in a separate terminal to get it.
|
||||
BIN
imgs/tutorial/bot-in-server.png
Normal file
|
After Width: | Height: | Size: 7.2 KiB |
BIN
imgs/tutorial/client-id.png
Normal file
|
After Width: | Height: | Size: 142 KiB |
BIN
imgs/tutorial/create-app.png
Normal file
|
After Width: | Height: | Size: 21 KiB |
BIN
imgs/tutorial/created-app.png
Normal file
|
After Width: | Height: | Size: 139 KiB |
BIN
imgs/tutorial/discord-dev.png
Normal file
|
After Width: | Height: | Size: 98 KiB |
BIN
imgs/tutorial/invite.png
Normal file
|
After Width: | Height: | Size: 147 KiB |
BIN
imgs/tutorial/scope.png
Normal file
|
After Width: | Height: | Size: 119 KiB |
BIN
imgs/tutorial/server-invite-1.png
Normal file
|
After Width: | Height: | Size: 139 KiB |
BIN
imgs/tutorial/server-invite-2-auth.png
Normal file
|
After Width: | Height: | Size: 196 KiB |
BIN
imgs/tutorial/server-invite-3.png
Normal file
|
After Width: | Height: | Size: 134 KiB |
BIN
imgs/tutorial/token.png
Normal file
|
After Width: | Height: | Size: 141 KiB |
1256
package-lock.json
generated
18
package.json
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "discord-ollama",
|
||||
"version": "0.5.0",
|
||||
"version": "0.5.2",
|
||||
"description": "Ollama Integration into discord",
|
||||
"main": "build/index.js",
|
||||
"exports": "./build/index.js",
|
||||
@@ -16,7 +16,7 @@
|
||||
"start": "docker compose build --no-cache && docker compose up -d",
|
||||
"docker:start": "npm run docker:network && npm run docker:build && npm run docker:client && npm run docker:ollama",
|
||||
"docker:start-cpu": "npm run docker:network && npm run docker:build && npm run docker:client && npm run docker:ollama-cpu",
|
||||
"docker:clean": "docker rmi $(docker images --filter \"dangling=true\" -q --no-trunc)",
|
||||
"docker:clean": "docker rm -f discord && docker rm -f ollama && docker rmi $(docker images --filter \"dangling=true\" -q --no-trunc)",
|
||||
"docker:network": "docker network create --subnet=172.18.0.0/16 ollama-net",
|
||||
"docker:build": "docker build --no-cache -t discord/bot:$(node -p \"require('./package.json').version\") .",
|
||||
"docker:test": "docker run -d --rm -v discord:/src/app --name test discord/bot:$(node -p \"require('./package.json').version\") npm run test:run",
|
||||
@@ -27,17 +27,17 @@
|
||||
"author": "Kevin Dang",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"discord.js": "^14.14.1",
|
||||
"dotenv": "^16.3.1",
|
||||
"ollama": "^0.5.0"
|
||||
"discord.js": "^14.15.3",
|
||||
"dotenv": "^16.4.5",
|
||||
"ollama": "^0.5.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^20.10.5",
|
||||
"@types/node": "^20.14.2",
|
||||
"@vitest/coverage-v8": "^1.6.0",
|
||||
"nodemon": "^3.0.2",
|
||||
"nodemon": "^3.1.3",
|
||||
"ts-node": "^10.9.2",
|
||||
"tsx": "^4.6.2",
|
||||
"typescript": "^5.3.3",
|
||||
"tsx": "^4.15.5",
|
||||
"typescript": "^5.4.5",
|
||||
"vitest": "^1.6.0"
|
||||
},
|
||||
"type": "module",
|
||||
|
||||
@@ -20,7 +20,7 @@ export const Capacity: SlashCommand = {
|
||||
run: async (client: Client, interaction: CommandInteraction) => {
|
||||
// fetch channel and message
|
||||
const channel = await client.channels.fetch(interaction.channelId)
|
||||
if (!channel || channel.type !== ChannelType.PublicThread) return
|
||||
if (!channel || channel.type !== (ChannelType.PublicThread && ChannelType.GuildText)) return
|
||||
|
||||
// set state of bot chat features
|
||||
openConfig('config.json', interaction.commandName, interaction.options.get('context-capacity')?.value)
|
||||
|
||||
34
src/commands/channelToggle.ts
Normal file
@@ -0,0 +1,34 @@
|
||||
import { ApplicationCommandOptionType, ChannelType, Client, CommandInteraction } from 'discord.js'
|
||||
import { SlashCommand } from '../utils/commands.js'
|
||||
import { openConfig } from '../utils/jsonHandler.js'
|
||||
|
||||
export const ChannelToggle: SlashCommand = {
|
||||
name: 'channel-toggle',
|
||||
description: 'toggles channel or thread usage.',
|
||||
|
||||
// set user option for toggling
|
||||
options: [
|
||||
{
|
||||
name: 'toggle-channel',
|
||||
description: 'toggle channel usage, otherwise threads',
|
||||
type: ApplicationCommandOptionType.Boolean,
|
||||
required: true
|
||||
}
|
||||
],
|
||||
|
||||
// Query for chatting preference
|
||||
run: async (client: Client, interaction: CommandInteraction) => {
|
||||
// fetch channel location
|
||||
const channel = await client.channels.fetch(interaction.channelId)
|
||||
if (!channel || channel.type !== (ChannelType.PublicThread && ChannelType.GuildText)) return
|
||||
|
||||
|
||||
// set state of bot channel preferences
|
||||
openConfig('config.json', interaction.commandName, interaction.options.get('toggle-channel')?.value)
|
||||
|
||||
interaction.reply({
|
||||
content: `Channel Preferences have for Regular Channels set to \`${interaction.options.get('toggle-channel')?.value}\``,
|
||||
ephemeral: true
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import { Disable } from './disable.js'
|
||||
import { Shutoff } from './shutoff.js'
|
||||
import { Capacity } from './capacity.js'
|
||||
import { PrivateThreadCreate } from './threadPrivateCreate.js'
|
||||
import { ChannelToggle } from './channelToggle.js'
|
||||
|
||||
export default [
|
||||
ThreadCreate,
|
||||
@@ -14,5 +15,6 @@ export default [
|
||||
MessageStream,
|
||||
Disable,
|
||||
Shutoff,
|
||||
Capacity
|
||||
Capacity,
|
||||
ChannelToggle
|
||||
] as SlashCommand[]
|
||||
@@ -20,13 +20,13 @@ export const MessageStream: SlashCommand = {
|
||||
run: async (client: Client, interaction: CommandInteraction) => {
|
||||
// verify channel
|
||||
const channel = await client.channels.fetch(interaction.channelId)
|
||||
if (!channel || channel.type !== ChannelType.PublicThread) return
|
||||
if (!channel || channel.type !== (ChannelType.PublicThread && ChannelType.GuildText)) return
|
||||
|
||||
// save value to json and write to it
|
||||
openConfig('config.json', interaction.commandName, interaction.options.get('stream')?.value)
|
||||
|
||||
interaction.reply({
|
||||
content: `Message streaming preferences for embed set to: \`${interaction.options.get('stream')?.value}\``,
|
||||
content: `Message streaming preferences set to: \`${interaction.options.get('stream')?.value}\``,
|
||||
ephemeral: true
|
||||
})
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ export const MessageStyle: SlashCommand = {
|
||||
run: async (client: Client, interaction: CommandInteraction) => {
|
||||
// fetch channel and message
|
||||
const channel = await client.channels.fetch(interaction.channelId)
|
||||
if (!channel || channel.type !== ChannelType.PublicThread) return
|
||||
if (!channel || channel.type !== (ChannelType.PublicThread && ChannelType.GuildText)) return
|
||||
|
||||
// set the message style
|
||||
openConfig('config.json', interaction.commandName, interaction.options.get('embed')?.value)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { embedMessage, event, Events, normalMessage, UserMessage } from '../utils/index.js'
|
||||
import { Configuration, getConfig, getThread, openConfig, openThreadInfo } from '../utils/jsonHandler.js'
|
||||
import { Configuration, getChannelInfo, getConfig, getThread, openChannelInfo, openConfig, openThreadInfo } from '../utils/jsonHandler.js'
|
||||
import { clean } from '../utils/mentionClean.js'
|
||||
import { ThreadChannel } from 'discord.js'
|
||||
import { TextChannel, ThreadChannel } from 'discord.js'
|
||||
|
||||
/**
|
||||
* Max Message length for free users is 2000 characters (bot or not).
|
||||
@@ -10,17 +10,6 @@ import { ThreadChannel } from 'discord.js'
|
||||
export default event(Events.MessageCreate, async ({ log, msgHist, tokens, ollama, client }, message) => {
|
||||
log(`Message \"${clean(message.content)}\" from ${message.author.tag} in channel/thread ${message.channelId}.`)
|
||||
|
||||
// need new check for "open/active" threads here!
|
||||
const threadMessages: UserMessage[] = await new Promise((resolve) => {
|
||||
// set new queue to modify
|
||||
getThread(`${message.channelId}.json`, (threadInfo) => {
|
||||
if (threadInfo?.messages)
|
||||
resolve(threadInfo.messages)
|
||||
else
|
||||
log(`Channel/Thread ${message.channelId} does not exist.`)
|
||||
})
|
||||
})
|
||||
|
||||
// Do not respond if bot talks in the chat
|
||||
if (message.author.tag === message.client.user.tag) return
|
||||
|
||||
@@ -45,6 +34,14 @@ export default event(Events.MessageCreate, async ({ log, msgHist, tokens, ollama
|
||||
return
|
||||
}
|
||||
|
||||
// ensure channel json exists, if not create it
|
||||
if (config.options['channel-toggle']) {
|
||||
openChannelInfo(message.channelId,
|
||||
message.channel as TextChannel,
|
||||
message.author.tag
|
||||
)
|
||||
}
|
||||
|
||||
// check if there is a set capacity in config
|
||||
if (typeof config.options['modify-capacity'] !== 'number')
|
||||
log(`Capacity is undefined, using default capacity of ${msgHist.capacity}.`)
|
||||
@@ -62,11 +59,31 @@ export default event(Events.MessageCreate, async ({ log, msgHist, tokens, ollama
|
||||
})
|
||||
})
|
||||
|
||||
// need new check for "open/active" threads/channels here!
|
||||
const chatMessages: UserMessage[] = await new Promise((resolve) => {
|
||||
// set new queue to modify
|
||||
if (config.options['channel-toggle']) {
|
||||
getChannelInfo(`${message.channelId}-${message.author.tag}.json`, (channelInfo) => {
|
||||
if (channelInfo?.messages)
|
||||
resolve(channelInfo.messages)
|
||||
else
|
||||
log(`Channel ${message.channel}-${message.author.tag} does not exist.`)
|
||||
})
|
||||
} else {
|
||||
getThread(`${message.channelId}.json`, (threadInfo) => {
|
||||
if (threadInfo?.messages)
|
||||
resolve(threadInfo.messages)
|
||||
else
|
||||
log(`Thread ${message.channelId} does not exist.`)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
// response string for ollama to put its response
|
||||
let response: string
|
||||
|
||||
// set up new queue
|
||||
msgHist.setQueue(threadMessages)
|
||||
msgHist.setQueue(chatMessages)
|
||||
|
||||
// check if we can push, if not, remove oldest
|
||||
while (msgHist.size() >= msgHist.capacity) msgHist.dequeue()
|
||||
@@ -96,10 +113,18 @@ export default event(Events.MessageCreate, async ({ log, msgHist, tokens, ollama
|
||||
})
|
||||
|
||||
// only update the json on success
|
||||
openThreadInfo(`${message.channelId}.json`,
|
||||
client.channels.fetch(message.channelId) as unknown as ThreadChannel,
|
||||
msgHist.getItems()
|
||||
)
|
||||
if (config.options['channel-toggle']) {
|
||||
openChannelInfo(message.channelId,
|
||||
message.channel as TextChannel,
|
||||
message.author.tag,
|
||||
msgHist.getItems()
|
||||
)
|
||||
} else {
|
||||
openThreadInfo(`${message.channelId}.json`,
|
||||
client.channels.fetch(message.channelId) as unknown as ThreadChannel,
|
||||
msgHist.getItems()
|
||||
)
|
||||
}
|
||||
} catch (error: any) {
|
||||
msgHist.pop() // remove message because of failure
|
||||
openConfig('config.json', 'message-style', false)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { ThreadChannel } from 'discord.js'
|
||||
import { TextChannel, ThreadChannel } from 'discord.js'
|
||||
import { UserMessage } from './events.js'
|
||||
import fs from 'fs'
|
||||
import path from 'path'
|
||||
@@ -9,7 +9,8 @@ export interface Configuration {
|
||||
'message-stream'?: boolean,
|
||||
'message-style'?: boolean,
|
||||
'toggle-chat'?: boolean,
|
||||
'modify-capacity'?: number
|
||||
'modify-capacity'?: number,
|
||||
'channel-toggle'?: boolean
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,6 +20,13 @@ export interface Thread {
|
||||
messages: UserMessage[]
|
||||
}
|
||||
|
||||
export interface Channel {
|
||||
readonly id: string
|
||||
readonly name: string
|
||||
readonly user: string
|
||||
messages: UserMessage[]
|
||||
}
|
||||
|
||||
/**
|
||||
* Method to open a file in the working directory and modify/create it
|
||||
*
|
||||
@@ -85,7 +93,7 @@ export function openThreadInfo(filename: string, thread: ThreadChannel, messages
|
||||
if (fs.existsSync(fullFileName)) {
|
||||
fs.readFile(fullFileName, 'utf8', (error, data) => {
|
||||
if (error)
|
||||
console.log(`[Error: openConfig] Incorrect file format`)
|
||||
console.log(`[Error: openThreadInfo] Incorrect file format`)
|
||||
else {
|
||||
const object = JSON.parse(data)
|
||||
object['messages'] = messages as []
|
||||
@@ -125,4 +133,74 @@ export async function getThread(filename: string, callback: (config: Thread | un
|
||||
} else {
|
||||
callback(undefined) // file not found
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Method to open the channel history
|
||||
*
|
||||
* @param filename name of the json file for the channel by user
|
||||
* @param channel the text channel info
|
||||
* @param user the user's name
|
||||
* @param messages their messages
|
||||
*/
|
||||
export async function openChannelInfo(filename: string, channel: TextChannel, user: string, messages: UserMessage[] = []): Promise<void> {
|
||||
// thread exist handler
|
||||
const isThread: boolean = await new Promise((resolve) => {
|
||||
getThread(`${channel.id}.json`, (threadInfo) => {
|
||||
if (threadInfo?.messages)
|
||||
resolve(true)
|
||||
else
|
||||
resolve(false)
|
||||
})
|
||||
})
|
||||
|
||||
// This is an existing thread, don't create another json
|
||||
if (isThread) return
|
||||
|
||||
const fullFileName = `data/${filename}-${user}.json`
|
||||
if (fs.existsSync(fullFileName)) {
|
||||
fs.readFile(fullFileName, 'utf8', (error, data) => {
|
||||
if (error)
|
||||
console.log(`[Error: openChannelInfo] Incorrect file format`)
|
||||
else {
|
||||
const object = JSON.parse(data)
|
||||
if (object['messages'].length === 0)
|
||||
object['messages'] = messages as []
|
||||
else if (object['messages'].length !== 0 && messages.length !== 0)
|
||||
object['messages'] = messages as []
|
||||
fs.writeFileSync(fullFileName, JSON.stringify(object, null, 2))
|
||||
}
|
||||
})
|
||||
} else { // file doesn't exist, create it
|
||||
const object: Configuration = JSON.parse(`{ \"id\": \"${channel?.id}\", \"name\": \"${channel?.name}\", \"user\": \"${user}\", \"messages\": []}`)
|
||||
|
||||
const directory = path.dirname(fullFileName)
|
||||
if (!fs.existsSync(directory))
|
||||
fs.mkdirSync(directory, { recursive: true })
|
||||
|
||||
// only creating it, no need to add anything
|
||||
fs.writeFileSync(fullFileName, JSON.stringify(object, null, 2))
|
||||
console.log(`[Util: openChannelInfo] Created '${fullFileName}' in working directory`)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Method to get the channel information/history
|
||||
*
|
||||
* @param filename name of the json file for the channel by user
|
||||
* @param callback function to handle resolving message history
|
||||
*/
|
||||
export async function getChannelInfo(filename: string, callback: (config: Channel | undefined) => void): Promise<void> {
|
||||
const fullFileName = `data/${filename}`
|
||||
if (fs.existsSync(fullFileName)) {
|
||||
fs.readFile(fullFileName, 'utf8', (error, data) => {
|
||||
if (error) {
|
||||
callback(undefined)
|
||||
return // something went wrong... stop
|
||||
}
|
||||
callback(JSON.parse(data))
|
||||
})
|
||||
} else {
|
||||
callback(undefined) // file not found
|
||||
}
|
||||
}
|
||||
@@ -2,6 +2,7 @@ import { EmbedBuilder, Message } from 'discord.js'
|
||||
import { ChatResponse, Ollama } from 'ollama'
|
||||
import { ChatParams, UserMessage, streamResponse, blockResponse } from './index.js'
|
||||
import { Queue } from '../queues/queue.js'
|
||||
import { AbortableAsyncIterator } from 'ollama/src/utils.js'
|
||||
|
||||
/**
|
||||
* Method to send replies as normal text on discord like any other user
|
||||
@@ -19,7 +20,7 @@ export async function embedMessage(
|
||||
stream: boolean
|
||||
): Promise<string> {
|
||||
// bot response
|
||||
let response: ChatResponse | AsyncGenerator<ChatResponse, any, unknown>
|
||||
let response: ChatResponse | AbortableAsyncIterator<ChatResponse>
|
||||
let result: string = ''
|
||||
|
||||
// initial message to client
|
||||
|
||||
@@ -2,6 +2,7 @@ import { Message } from 'discord.js'
|
||||
import { ChatResponse, Ollama } from 'ollama'
|
||||
import { ChatParams, UserMessage, streamResponse, blockResponse } from './index.js'
|
||||
import { Queue } from '../queues/queue.js'
|
||||
import { AbortableAsyncIterator } from 'ollama/src/utils.js'
|
||||
|
||||
/**
|
||||
* Method to send replies as normal text on discord like any other user
|
||||
@@ -19,7 +20,7 @@ export async function normalMessage(
|
||||
stream: boolean
|
||||
): Promise<string> {
|
||||
// bot's respnse
|
||||
let response: ChatResponse | AsyncGenerator<ChatResponse, any, unknown>
|
||||
let response: ChatResponse | AbortableAsyncIterator<ChatResponse>
|
||||
let result: string = ''
|
||||
|
||||
await message.channel.send('Generating Response . . .').then(async sentMessage => {
|
||||
@@ -32,19 +33,23 @@ export async function normalMessage(
|
||||
|
||||
// run query based on stream preference, true = stream, false = block
|
||||
if (stream) {
|
||||
response = await streamResponse(params)
|
||||
let messageBlock: Message = sentMessage
|
||||
response = await streamResponse(params) // THIS WILL BE SLOW due to discord limits!
|
||||
for await (const portion of response) {
|
||||
// append token to message
|
||||
result += portion.message.content
|
||||
// check if over discord message limit
|
||||
if (result.length + portion.message.content.length > 2000) {
|
||||
result = portion.message.content
|
||||
|
||||
// exceeds handled length
|
||||
if (result.length > 2000) {
|
||||
message.channel.send(`Response length ${result.length} has exceeded Discord maximum.\n\nLong Stream messages not supported.`)
|
||||
break // stop stream
|
||||
}
|
||||
|
||||
// resent current output, THIS WILL BE SLOW due to discord limits!
|
||||
sentMessage.edit(result || 'No Content Yet...')
|
||||
// new message block, wait for it to send and assign new block to respond.
|
||||
await message.channel.send("Creating new stream block...").then(sentMessage => { messageBlock = sentMessage })
|
||||
} else {
|
||||
result += portion.message.content
|
||||
|
||||
// ensure block is not empty
|
||||
if (result.length > 5)
|
||||
messageBlock.edit(result)
|
||||
}
|
||||
console.log(result)
|
||||
}
|
||||
}
|
||||
else {
|
||||
|
||||
@@ -1,23 +1,23 @@
|
||||
import { ChatResponse } from "ollama"
|
||||
import { ChatParams } from "./index.js"
|
||||
import { AbortableAsyncIterator } from "ollama/src/utils.js"
|
||||
|
||||
/**
|
||||
* Method to query the Ollama client for async generation
|
||||
* @param params
|
||||
* @returns Asyn
|
||||
*/
|
||||
export async function streamResponse(params: ChatParams): Promise<AsyncGenerator<ChatResponse, any, unknown>> {
|
||||
export async function streamResponse(params: ChatParams): Promise<AbortableAsyncIterator<ChatResponse>> {
|
||||
return await params.ollama.chat({
|
||||
model: params.model,
|
||||
messages: params.msgHist,
|
||||
options: {
|
||||
num_thread: 8, // remove if optimization needed further
|
||||
mirostat: 1,
|
||||
mirostat_tau: 2.0,
|
||||
top_k: 70
|
||||
},
|
||||
stream: true
|
||||
})
|
||||
}) as unknown as AbortableAsyncIterator<ChatResponse>
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -30,7 +30,6 @@ export async function blockResponse(params: ChatParams): Promise<ChatResponse> {
|
||||
model: params.model,
|
||||
messages: params.msgHist,
|
||||
options: {
|
||||
num_thread: 8, // remove if optimization needed further
|
||||
mirostat: 1,
|
||||
mirostat_tau: 2.0,
|
||||
top_k: 70
|
||||
|
||||
@@ -22,6 +22,6 @@ describe('#commands', () => {
|
||||
// test specific commands in the object
|
||||
it('references specific commands', () => {
|
||||
const commandsString = commands.map(e => e.name).join(', ')
|
||||
expect(commandsString).toBe('thread, private-thread, message-style, message-stream, toggle-chat, shutoff, modify-capacity')
|
||||
expect(commandsString).toBe('thread, private-thread, message-style, message-stream, toggle-chat, shutoff, modify-capacity, channel-toggle')
|
||||
})
|
||||
})
|
||||
@@ -2,8 +2,8 @@
|
||||
"compilerOptions": {
|
||||
// Dependent on node version
|
||||
"target": "ES2020",
|
||||
"module": "Node16",
|
||||
"moduleResolution": "Node16",
|
||||
"module": "NodeNext",
|
||||
"moduleResolution": "NodeNext",
|
||||
"strict": true,
|
||||
// We must set the type
|
||||
"noImplicitAny": true,
|
||||
@@ -13,11 +13,17 @@
|
||||
"strictNullChecks": true,
|
||||
// We can import json files like JavaScript
|
||||
"resolveJsonModule": true,
|
||||
// Decompile .ts to .js into a folder named dist
|
||||
"skipLibCheck": true,
|
||||
"esModuleInterop": true,
|
||||
// Decompile .ts to .js into a folder named build
|
||||
"outDir": "build",
|
||||
"rootDir": "src"
|
||||
"rootDir": "src",
|
||||
"baseUrl": ".",
|
||||
"paths": {
|
||||
"*": ["node_modules/"]
|
||||
}
|
||||
},
|
||||
// environment for env vars
|
||||
"include": ["src/**/*"],
|
||||
"include": ["src/**/*.ts"],
|
||||
"exclude": ["node_modules"]
|
||||
}
|
||||