Compare commits

...

5 Commits

Author SHA1 Message Date
Kevin Dang
5f8b513269 Workflows Fix (#32)
* fix: workflows missing new env
2024-04-01 00:51:07 -07:00
Kevin Dang
fcb0267559 Shutoff Bot Command (#30)
* add: disable chat command

* update: workflow name

* add: shutoff using admin env list

* update: sample env for admins

* fix: shutdown booleans

* update: version increment
2024-04-01 00:43:19 -07:00
Kevin Dang
6b903cff5e Auto-Generate Config (#29)
* fix: name in job

* add: auto create config.json on missing

* update: readme goals

* add: clarify instructions on fail chat

* update: reduced redundancy in package file
2024-03-30 22:02:49 -07:00
Kevin Dang
9320a7476e CI for Application Builds (#27) 2024-03-29 23:33:03 -07:00
Kevin Dang
1b70fc2787 Nvidia Container Toolkit Setup and Docs (#26)
* update: nvidia as runtime

* add: setup instructions for nvidia

* update: setup-docker.md
2024-03-28 12:01:33 -07:00
13 changed files with 238 additions and 19 deletions

View File

@@ -21,4 +21,7 @@ OLLAMA_PORT = PORT
DISCORD_IP = IP_ADDRESS
# subnet address, ex. 172.18.0.0 as we use /16.
SUBNET_ADDRESS = ADDRESS
SUBNET_ADDRESS = ADDRESS
# list of admins to handle admin commands for the bot, use single quotes
ADMINS=['username1', 'username2', 'username3', ...]

81
.github/workflows/build-test.yml vendored Normal file
View File

@@ -0,0 +1,81 @@
name: Builds
run-name: Validate Node and Docker Builds
on:
push:
branches:
- master
jobs:
Discord-Node-Build: # test if the node install and run
runs-on: ubuntu-latest
timeout-minutes: 2
steps:
- name: Checkout Repository
uses: actions/checkout@v4
- name: Set up Node Environment v18.18.2
uses: actions/setup-node@v4
with:
node-version: 18.18.2
cache: 'npm'
- name: Install Project Dependencies
run: |
npm install
- name: Build Application
run: |
npm run build
- name: Create Environment Variables
run: |
touch .env
echo CLIENT_TOKEN = ${{ secrets.BOT_TOKEN }} >> .env
echo GUILD_ID = ${{ secrets.GUILD_ID }} >> .env
echo CHANNEL_ID = ${{ secrets.CHANNEL_ID }} >> .env
echo MODEL = ${{ secrets.MODEL }} >> .env
echo CLIENT_UID = ${{ secrets.CLIENT_UID }} >> .env
echo OLLAMA_IP = ${{ secrets.OLLAMA_IP }} >> .env
echo OLLAMA_PORT = ${{ secrets.OLLAMA_PORT }} >> .env
- name: Startup Discord Bot Client
run: |
nohup npm run prod &
Discord-Ollama-Container-Build: # test docker build and run
runs-on: ubuntu-latest
timeout-minutes: 2
steps:
- name: Checkout Repository
uses: actions/checkout@v4
- name: Set up Node Environment v18.18.2
uses: actions/setup-node@v4
with:
node-version: 18.18.2
cache: 'npm'
- name: Create Environment Variables
run: |
touch .env
echo CLIENT_TOKEN = ${{ secrets.BOT_TOKEN }} >> .env
echo GUILD_ID = ${{ secrets.GUILD_ID }} >> .env
echo CHANNEL_ID = ${{ secrets.CHANNEL_ID }} >> .env
echo MODEL = ${{ secrets.MODEL }} >> .env
echo CLIENT_UID = ${{ secrets.CLIENT_UID }} >> .env
echo OLLAMA_IP = ${{ secrets.OLLAMA_IP }} >> .env
echo OLLAMA_PORT = ${{ secrets.OLLAMA_PORT }} >> .env
echo ADMINS = ${{ secrets.ADMINS }} >> .env
- name: Setup Docker Network and Images
run: |
npm run docker:start-cpu
- name: Check Images Exist
run: |
(docker images | grep -q 'discord/bot' && docker images | grep -qE 'ollama/ollama') || exit 1
- name: Check Containers Exist
run: |
(docker ps | grep -q 'ollama' && docker ps | grep -q 'discord') || exit 1

View File

@@ -1,5 +1,5 @@
# Discord Ollama Integration [![License: CC BY-NC 4.0](https://img.shields.io/badge/License-CC_BY--NC_4.0-darkgreen.svg)](https://creativecommons.org/licenses/by-nc/4.0/) [![Release Badge](https://img.shields.io/github/v/release/kevinthedang/discord-ollama?logo=github)](https://github.com/kevinthedang/discord-ollama/releases/latest)
Ollama is an AI model management tool that allows users to install and use custom large language models locally. The goal is to create a discord bot that will utilize Ollama and chat with it on a Discord!
Ollama is an AI model management tool that allows users to install and use custom large language models locally. The goal is to create a discord bot that will utilize Ollama and chat with it on a Discord server! Also, allow others to create their own models personalized for their own servers!
## Environment Setup
* Clone this repo using `git clone https://github.com/kevinthedang/discord-ollama.git` or just use [GitHub Desktop](https://desktop.github.com/) to clone the repo.

View File

@@ -8,7 +8,7 @@ services:
build: ./ # find docker file in designated path
container_name: discord
restart: always # rebuild container always
image: discord/bot:0.2.0
image: discord/bot:0.3.4
environment:
CLIENT_TOKEN: ${CLIENT_TOKEN}
GUILD_ID: ${GUILD_ID}
@@ -17,6 +17,7 @@ services:
CLIENT_UID: ${CLIENT_UID}
OLLAMA_IP: ${OLLAMA_IP}
OLLAMA_PORT: ${OLLAMA_PORT}
ADMINS: ${ADMINS}
networks:
ollama-net:
ipv4_address: ${DISCORD_IP}
@@ -32,9 +33,9 @@ services:
ollama-net:
ipv4_address: ${OLLAMA_IP}
# runtime: nvidia # use Nvidia Container Toolkit for GPU support
# devices:
# - /dev/nvidia0
runtime: nvidia # use Nvidia Container Toolkit for GPU support
devices:
- /dev/nvidia0
volumes:
- ollama:/root/.ollama
ports:

View File

@@ -2,6 +2,43 @@
* Follow this guide to setup [Docker](https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-on-ubuntu-20-04)
* If on Windows, download [Docker Desktop](https://docs.docker.com/desktop/install/windows-install/) to get the docker engine.
* Please also install [Docker Compose](https://docs.docker.com/compose/install/linux/) for easy running. If not, there are [scripts](#manual-run-with-docker) to set everything up.
* **IMPORTANT NOTE**: Currently, it seems like wsl does not like Nvidia Container Toolkit. It will work initially then reset it for some odd reason. For now, it is advised to use an actually Linux machine to run using Docker. If you do not care about utilizing your GPU or don't even have a Nvidia GPU then disregard this.
## Nvidia Container Toolkit Setup
### Installation with Apt
* Instructions can be found [here](https://github.com/kevinthedang/discord-ollama/issues/23) in **Steps to reproduce** or below:
* Step 1. Configure the production repository on machine:
```sh
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \
&& curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \
sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \
sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list
```
* Step 2. Update the packages list from the repository:
```sh
sudo apt-get update
```
* Step 3. Install the Nvidia Container Toolkit:
```sh
sudo apt-get install -y nvidia-container-toolkit
```
### Configurating with Docker
Step 1.Configure the container runtime by using the `nvidia-ctk` command:
```sh
sudo nvidia-ctk runtime configure --runtime=docker
```
The `nvidia-ctk` command modifies the `/etc/docker/daemon.json` file on the host. The file is updated so that Docker can use the NVIDIA Container Runtime.
Step 2. Restart the Docker daemon:
```sh
sudo systemctl restart docker
```
### References for setup
* Guide to installing [Nvidia Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html)
* [GitHub repository](https://github.com/NVIDIA/nvidia-container-toolkit?tab=readme-ov-file) for Nvidia Container Toolkit
## To Run (with Docker and Docker Compose)
* With the inclusion of subnets in the `docker-compose.yml`, you will need to set the `SUBNET_ADDRESS`, `OLLAMA_IP`, `OLLAMA_PORT`, and `DISCORD_IP`. Here are some default values if you don't care:

4
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "discord-ollama",
"version": "0.3.0",
"version": "0.3.4",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "discord-ollama",
"version": "0.3.0",
"version": "0.3.4",
"license": "ISC",
"dependencies": {
"axios": "^1.6.2",

View File

@@ -1,6 +1,6 @@
{
"name": "discord-ollama",
"version": "0.3.0",
"version": "0.3.4",
"description": "Ollama Integration into discord",
"main": "build/index.js",
"exports": "./build/index.js",
@@ -10,14 +10,16 @@
"build": "tsc",
"prod": "node .",
"client": "npm run build && npm run prod",
"clean": "docker compose down && docker rmi $(docker images | grep 0.2.0 | tr -s ' ' | cut -d ' ' -f 3) && docker rmi $(docker images --filter \"dangling=true\" -q --no-trunc)",
"clean": "docker compose down && docker rmi $(docker images | grep $(node -p \"require('./package.json').version\") | tr -s ' ' | cut -d ' ' -f 3) && docker rmi $(docker images --filter \"dangling=true\" -q --no-trunc)",
"start": "docker compose build --no-cache && docker compose up -d",
"docker:start": "npm run docker:network && npm run docker:build && npm run docker:client && npm run docker:ollama",
"docker:start-cpu": "npm run docker:network && npm run docker:build && npm run docker:client && npm run docker:ollama-cpu",
"docker:clean": "docker rmi $(docker images --filter \"dangling=true\" -q --no-trunc)",
"docker:network": "docker network create --subnet=172.18.0.0/16 ollama-net",
"docker:build": "docker build --no-cache -t discord/bot:0.2.0 .",
"docker:client": "docker run -d -v discord:/src/app --name discord --network ollama-net --ip 172.18.0.3 discord",
"docker:ollama": "docker run -d --gpus=all -v ollama:/root/.ollama -p 11434:11434 --name ollama --network ollama-net --ip 172.18.0.2 ollama/ollama:latest"
"docker:build": "docker build --no-cache -t discord/bot:$(node -p \"require('./package.json').version\") .",
"docker:client": "docker run -d -v discord:/src/app --name discord --network ollama-net --ip 172.18.0.3 discord/bot:$(node -p \"require('./package.json').version\")",
"docker:ollama": "docker run -d --gpus=all -v ollama:/root/.ollama -p 11434:11434 --name ollama --network ollama-net --ip 172.18.0.2 ollama/ollama:latest",
"docker:ollama-cpu": "docker run -d -v ollama:/root/.ollama -p 11434:11434 --name ollama --network ollama-net --ip 172.18.0.2 ollama/ollama:latest"
},
"author": "Kevin Dang",
"license": "ISC",

33
src/commands/disable.ts Normal file
View File

@@ -0,0 +1,33 @@
import { ChannelType, Client, CommandInteraction, ApplicationCommandOptionType } from 'discord.js'
import { SlashCommand } from '../utils/commands.js'
import { openFile } from '../utils/jsonHandler.js'
export const Disable: SlashCommand = {
name: 'toggle-chat',
description: 'toggle all chat features, slash commands will still work.',
// set available user options to pass to the command
options: [
{
name: 'enabled',
description: 'true = enabled, false = disabled',
type: ApplicationCommandOptionType.Boolean,
required: true
}
],
// Query for message information and set the style
run: async (client: Client, interaction: CommandInteraction) => {
// fetch channel and message
const channel = await client.channels.fetch(interaction.channelId)
if (!channel || channel.type !== ChannelType.GuildText) return
// set state of bot chat features
openFile('config.json', interaction.commandName, interaction.options.get('enabled')?.value)
interaction.reply({
content: `Chat features has been \`${interaction.options.get('enabled')?.value ? "enabled" : "disabled" }\``,
ephemeral: true
})
}
}

View File

@@ -2,9 +2,13 @@ import { SlashCommand } from '../utils/commands.js'
import { ThreadCreate } from './threadCreate.js'
import { MessageStyle } from './messageStyle.js'
import { MessageStream } from './messageStream.js'
import { Disable } from './disable.js'
import { Shutoff } from './shutoff.js'
export default [
ThreadCreate,
MessageStyle,
MessageStream
MessageStream,
Disable,
Shutoff
] as SlashCommand[]

48
src/commands/shutoff.ts Normal file
View File

@@ -0,0 +1,48 @@
import { ChannelType, Client, CommandInteraction, ApplicationCommandOptionType } from 'discord.js'
import { SlashCommand } from '../utils/commands.js'
import Keys from '../keys.js'
export const Shutoff: SlashCommand = {
name: 'shutoff',
description: 'shutdown the bot. You will need to manually bring it online again.',
// set available user options to pass to the command
options: [
{
name: 'are-you-sure',
description: 'true = yes, false = I\'m scared',
type: ApplicationCommandOptionType.Boolean,
required: true
}
],
// Query for message information and set the style
run: async (client: Client, interaction: CommandInteraction) => {
// fetch channel and message
const channel = await client.channels.fetch(interaction.channelId)
if (!channel || channel.type !== ChannelType.GuildText) return
// log this, this will probably be improtant for logging who did this
console.log(`User -> ${interaction.user.tag} attempting to shutdown ${client.user!!.tag}`)
// create list of superUsers based on string parse
const superUsers: string[] = JSON.parse(Keys.superUser.replace(/'/g, '"'))
// check if admin or false on shutdown
if (interaction.user.tag in superUsers || !(!interaction.options.get('are-you-sure')?.value && interaction.user.tag in superUsers)) {
interaction.reply({
content: `Shutdown failed:\n\n${interaction.user.tag}, You do not have permission to shutoff **${client.user?.tag}**, otherwise, you just didn't want to.`,
ephemeral: true
})
return // stop from shutting down
}
interaction.reply({
content: `${client.user?.tag} is ${interaction.options.get('are-you-sure')?.value ? "shutting down now." : "not shutting down." }`,
ephemeral: true
})
// clean up client instance and stop
client.destroy()
}
}

View File

@@ -1,6 +1,6 @@
import { ChatResponse } from 'ollama'
import { embedMessage, event, Events, normalMessage } from '../utils/index.js'
import { Configuration, getConfig } from '../utils/jsonHandler.js'
import { Configuration, getConfig, openFile } from '../utils/jsonHandler.js'
/**
* Max Message length for free users is 2000 characters (bot or not).
@@ -28,8 +28,15 @@ export default event(Events.MessageCreate, async ({ log, msgHist, tokens, ollama
try {
const config: Configuration = await new Promise((resolve, reject) => {
getConfig('config.json', (config) => {
// check if config.json exists
if (config === undefined) {
reject(new Error('No Configuration is set up.'))
reject(new Error('No Configuration is set up.\n\nCreating \`config.json\` with \`message-style\` set as \`true\` for embedded messages.\nPlease try chatting again.'))
return
}
// check if chat is disabled
if(!config.options['toggle-chat']) {
reject(new Error('Admin(s) have disabled chat features.\n\n Please contact your server\'s admin(s).'))
return
}
resolve(config)
@@ -54,6 +61,7 @@ export default event(Events.MessageCreate, async ({ log, msgHist, tokens, ollama
})
} catch (error: any) {
msgHist.pop() // remove message because of failure
message.reply(`**Response generation failed.**\n\nReason: ${error.message}\n\nPlease use any config slash command.`)
openFile('config.json', 'message-style', true)
message.reply(`**Response generation failed.**\n\n**Reason:** *${error.message}*`)
}
})

View File

@@ -7,7 +7,8 @@ export const Keys = {
clientUid: getEnvVar('CLIENT_UID'),
guildId: getEnvVar('GUILD_ID'),
ipAddress: getEnvVar('OLLAMA_IP'),
portAddress: getEnvVar('OLLAMA_PORT')
portAddress: getEnvVar('OLLAMA_PORT'),
superUser: getEnvVar('ADMINS')
} as const // readonly keys
export default Keys

View File

@@ -4,7 +4,8 @@ export interface Configuration {
readonly name: string
options: {
'message-stream'?: boolean,
'message-style'?: boolean
'message-style'?: boolean,
'toggle-chat'?: boolean
}
}