mirror of
https://github.com/kevinthedang/discord-ollama.git
synced 2025-12-12 11:56:06 -05:00
Compare commits
3 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1ccd1a012e | ||
|
|
68a5e097fe | ||
|
|
624ff2e5c8 |
1
.github/CONTRIBUTING.md
vendored
1
.github/CONTRIBUTING.md
vendored
@@ -11,6 +11,7 @@
|
||||
* features: `'feature/**'`
|
||||
* releases: `'releases/**'`
|
||||
* bugs: `'bug/**'`
|
||||
* docs: `'docs/**'`
|
||||
|
||||
## Run the Bot
|
||||
* Refer to all sections below before running the bot.
|
||||
|
||||
@@ -31,6 +31,11 @@ The project aims to:
|
||||
* [ ] Documentation on creating your own LLM
|
||||
* [ ] Documentation on web scrapping and cleaning
|
||||
|
||||
## Documentation
|
||||
These are guides to the feature set included and the events triggered in this app.
|
||||
* [User Slash Commands](./docs/commands-guide.md)
|
||||
* [Client Events](./docs/events-guide.md)
|
||||
|
||||
## Environment Setup
|
||||
* Clone this repo using `git clone https://github.com/kevinthedang/discord-ollama.git` or just use [GitHub Desktop](https://desktop.github.com/) to clone the repo.
|
||||
* You will need a `.env` file in the root of the project directory with the bot's token. There is a `.env.sample` is provided for you as a reference for what environment variables.
|
||||
@@ -41,6 +46,7 @@ The project aims to:
|
||||
* [Docker Setup for Servers and Local Machines](./docs/setup-docker.md)
|
||||
* Nvidia is recommended for now, but support for other GPUs should be development.
|
||||
* Local use is not recommended.
|
||||
|
||||
## Resources
|
||||
* [NodeJS](https://nodejs.org/en)
|
||||
* This project runs on `lts\hydrogen`.
|
||||
|
||||
@@ -7,7 +7,7 @@ services:
|
||||
build: ./ # find docker file in designated path
|
||||
container_name: discord
|
||||
restart: always # rebuild container always
|
||||
image: kevinthedang/discord-ollama:0.7.0
|
||||
image: kevinthedang/discord-ollama:0.7.1
|
||||
environment:
|
||||
CLIENT_TOKEN: ${CLIENT_TOKEN}
|
||||
OLLAMA_IP: ${OLLAMA_IP}
|
||||
|
||||
105
docs/commands-guide.md
Normal file
105
docs/commands-guide.md
Normal file
@@ -0,0 +1,105 @@
|
||||
## Commands Guide
|
||||
This is a guide to all of the slash commands for the app.
|
||||
|
||||
* Action Commands are commands that do not affect a user's `preference file`.
|
||||
* Guild Commands can also be considered action commands.
|
||||
|
||||
> [!NOTE]
|
||||
> Administrator commands are only usable by actual administrators on the Discord server.
|
||||
|
||||
### Guild Commands (Administrator)
|
||||
1. Disable (or Toggle Chat)
|
||||
This command will `enable` or `disable` whether or not the app will respond to users.
|
||||
|
||||
```
|
||||
/toggle-chat enabled true
|
||||
```
|
||||
|
||||
2. Shutoff
|
||||
This command will shutoff the app so no users can converse with it.
|
||||
The app must be manually restarted upon being shutoff.
|
||||
|
||||
Below shuts off the app by putting `true` in the `are-your-sure` field.
|
||||
|
||||
```
|
||||
/shutoff are-you-sure true
|
||||
```
|
||||
|
||||
### Action Commands
|
||||
1. Clear Channel (Message) History
|
||||
This command will clear the history of the current channel for the user that calls it.
|
||||
Running the command in any channel will clear the message history.
|
||||
|
||||
```
|
||||
/clear-user-channel-history
|
||||
```
|
||||
|
||||
2. Pull Model
|
||||
This command will pull a model that exists on the [Ollama Model Library](https://ollama.com/library). If it does not exist there, it will throw a hissy fit.
|
||||
|
||||
Below trys to pull the `codellama` model.
|
||||
|
||||
```
|
||||
/pull-model model-to-pull codellama
|
||||
```
|
||||
|
||||
3. Thread Create
|
||||
This command creates a public thread to talk with the app instead of using a `GuildText` channel.
|
||||
|
||||
```
|
||||
/thread
|
||||
```
|
||||
|
||||
4. (Private) Thread Create
|
||||
This command creates a private thread to talk with the bot privately.
|
||||
Invite others to the channel and they will be able to talk to the app as well.
|
||||
|
||||
```
|
||||
/private-thread
|
||||
```
|
||||
|
||||
### User Preference Commands
|
||||
1. Capacity
|
||||
This command changes how much context it will keep in conversations with the app.
|
||||
This is applied for all of existing chats when interacting with the app.
|
||||
|
||||
Below sets the message history capacity to at most 5 messages at once.
|
||||
|
||||
```
|
||||
/modify-capacity context-capacity 5
|
||||
```
|
||||
|
||||
2. Message Stream
|
||||
This command will toggle whether or not the app will "stream" a response.
|
||||
(think of how ChatGPT and other interfaces do this)
|
||||
|
||||
Below sets the `stream` to true to make the app respond in increments.
|
||||
|
||||
```
|
||||
/message-stream stream true
|
||||
```
|
||||
> [!NOTE]
|
||||
> This is a very slow progress on Discord because "spamming" changes within 5 seconds is not allowed.
|
||||
|
||||
3. Message Style
|
||||
This command allows a user to select whether to embed the app's response.
|
||||
|
||||
```
|
||||
/message-style embed true
|
||||
```
|
||||
|
||||
This allows the app to respond as a user would normally respond.
|
||||
|
||||
```
|
||||
/message-style embed false
|
||||
```
|
||||
|
||||
4. Switch Model
|
||||
This command will switch the user-preferred model so long as it exists in within the local ollama service or from the [Ollama Model Library](https://ollama.com/library).
|
||||
If it cannot be found locally, it will attempt to find it in the model library.
|
||||
|
||||
Below we are trying to switch to a specific model size.
|
||||
|
||||
```
|
||||
/switch-model model-to-use llama3.2:1.3b
|
||||
```
|
||||
27
docs/events-guide.md
Normal file
27
docs/events-guide.md
Normal file
@@ -0,0 +1,27 @@
|
||||
## Events Guide
|
||||
This is a guide to all of the client events for the app.
|
||||
|
||||
> [!NOTE] Each of these is logged to the console for a developer to track.
|
||||
|
||||
1. ClientReady
|
||||
This event signifies that the Discord app is online.
|
||||
Here the app's activity is set and its commands are registered.
|
||||
|
||||
2. InteractionCreate
|
||||
This event signifies that a user interacted from Discord in some way.
|
||||
Here commands are selected from a knowledge bank and executed if found.
|
||||
|
||||
> [!NOTE] Possible interactions include commands, buttons, menus, etc.
|
||||
|
||||
3. MessageCreate
|
||||
This event signifies that a message was sent.
|
||||
Here user questions and comments for the LLM are processed.
|
||||
1. check message is from a user and mentions the app
|
||||
2. check for interaction preferences
|
||||
3. add the message to a queue
|
||||
4. check the response for success
|
||||
5. send a response back to the user.
|
||||
|
||||
4. ThreadDelete
|
||||
This event signifies that a Discord Thread was deleted.
|
||||
Here any preferences set for interaction within the thread are cleared away.
|
||||
1009
package-lock.json
generated
1009
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
12
package.json
12
package.json
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "discord-ollama",
|
||||
"version": "0.7.0",
|
||||
"version": "0.7.1",
|
||||
"description": "Ollama Integration into discord",
|
||||
"main": "build/index.js",
|
||||
"exports": "./build/index.js",
|
||||
@@ -26,17 +26,17 @@
|
||||
"author": "Kevin Dang",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"discord.js": "^14.15.3",
|
||||
"discord.js": "^14.16.3",
|
||||
"dotenv": "^16.4.5",
|
||||
"ollama": "^0.5.9"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^22.7.5",
|
||||
"@vitest/coverage-v8": "^2.1.2",
|
||||
"@types/node": "^22.9.0",
|
||||
"@vitest/coverage-v8": "^2.1.4",
|
||||
"ts-node": "^10.9.2",
|
||||
"tsx": "^4.19.1",
|
||||
"tsx": "^4.19.2",
|
||||
"typescript": "^5.6.3",
|
||||
"vitest": "^2.1.2"
|
||||
"vitest": "^2.1.4"
|
||||
},
|
||||
"type": "module",
|
||||
"engines": {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { EmbedBuilder, Message } from 'discord.js'
|
||||
import { EmbedBuilder, Message, SendableChannels } from 'discord.js'
|
||||
import { ChatResponse, Ollama } from 'ollama'
|
||||
import { ChatParams, UserMessage, streamResponse, blockResponse } from './index.js'
|
||||
import { Queue } from '../queues/queue.js'
|
||||
@@ -28,7 +28,8 @@ export async function embedMessage(
|
||||
.setColor('#00FF00')
|
||||
|
||||
// send the message
|
||||
const sentMessage = await message.channel.send({ embeds: [botMessage] })
|
||||
const channel = message.channel as SendableChannels
|
||||
const sentMessage = await channel.send({ embeds: [botMessage] })
|
||||
|
||||
// create params
|
||||
const params: ChatParams = {
|
||||
@@ -48,12 +49,12 @@ export async function embedMessage(
|
||||
// exceeds handled length
|
||||
if (result.length > 5000) {
|
||||
const errorEmbed = new EmbedBuilder()
|
||||
.setTitle(`Responding to ${message.author.tag}`)
|
||||
.setDescription(`Response length ${result.length} has exceeded Discord maximum.\n\nLong Stream messages not supported.`)
|
||||
.setColor('#00FF00')
|
||||
.setTitle(`Responding to ${message.author.tag}`)
|
||||
.setDescription(`Response length ${result.length} has exceeded Discord maximum.\n\nLong Stream messages not supported.`)
|
||||
.setColor('#00FF00')
|
||||
|
||||
// send error
|
||||
message.channel.send({ embeds: [errorEmbed] })
|
||||
channel.send({ embeds: [errorEmbed] })
|
||||
break // cancel loop and stop
|
||||
}
|
||||
|
||||
@@ -90,7 +91,7 @@ export async function embedMessage(
|
||||
.setDescription(result.slice(0, 5000) || 'No Content to Provide...')
|
||||
.setColor('#00FF00')
|
||||
|
||||
message.channel.send({ embeds: [whileEmbed] })
|
||||
channel.send({ embeds: [whileEmbed] })
|
||||
result = result.slice(5000)
|
||||
}
|
||||
|
||||
@@ -100,7 +101,7 @@ export async function embedMessage(
|
||||
.setColor('#00FF00')
|
||||
|
||||
// rest of the response
|
||||
message.channel.send({ embeds: [lastEmbed] })
|
||||
channel.send({ embeds: [lastEmbed] })
|
||||
} else {
|
||||
// only need to create 1 embed, handles 6000 characters
|
||||
const newEmbed = new EmbedBuilder()
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { Message } from 'discord.js'
|
||||
import { Message, SendableChannels } from 'discord.js'
|
||||
import { ChatResponse, Ollama } from 'ollama'
|
||||
import { ChatParams, UserMessage, streamResponse, blockResponse } from './index.js'
|
||||
import { Queue } from '../queues/queue.js'
|
||||
@@ -20,8 +20,9 @@ export async function normalMessage(
|
||||
// bot's respnse
|
||||
let response: ChatResponse | AbortableAsyncIterator<ChatResponse>
|
||||
let result: string = ''
|
||||
const channel = message.channel as SendableChannels
|
||||
|
||||
await message.channel.send('Generating Response . . .').then(async sentMessage => {
|
||||
await channel.send('Generating Response . . .').then(async sentMessage => {
|
||||
try {
|
||||
const params: ChatParams = {
|
||||
model: model,
|
||||
@@ -39,7 +40,7 @@ export async function normalMessage(
|
||||
result = portion.message.content
|
||||
|
||||
// new message block, wait for it to send and assign new block to respond.
|
||||
await message.channel.send("Creating new stream block...").then(sentMessage => { messageBlock = sentMessage })
|
||||
await channel.send("Creating new stream block...").then(sentMessage => { messageBlock = sentMessage })
|
||||
} else {
|
||||
result += portion.message.content
|
||||
|
||||
@@ -61,12 +62,12 @@ export async function normalMessage(
|
||||
|
||||
// handle for rest of message that is >2000
|
||||
while (result.length > 2000) {
|
||||
message.channel.send(result.slice(0, 2000))
|
||||
channel.send(result.slice(0, 2000))
|
||||
result = result.slice(2000)
|
||||
}
|
||||
|
||||
// last part of message
|
||||
message.channel.send(result)
|
||||
channel.send(result)
|
||||
} else // edit the 'generic' response to new message since <2000
|
||||
sentMessage.edit(result)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user