mirror of
https://github.com/kevinthedang/discord-ollama.git
synced 2025-12-12 11:56:06 -05:00
Compare commits
5 Commits
cc7a3661b7
...
v0.8.5
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e07e8fbf89 | ||
|
|
6d0a537540 | ||
|
|
0ddd59aea1 | ||
|
|
a5faca87aa | ||
|
|
4c96b3863a |
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@@ -33,6 +33,7 @@ jobs:
|
||||
echo CLIENT_TOKEN = ${{ secrets.BOT_TOKEN }} >> .env
|
||||
echo OLLAMA_IP = ${{ secrets.OLLAMA_IP }} >> .env
|
||||
echo OLLAMA_PORT = ${{ secrets.OLLAMA_PORT }} >> .env
|
||||
echo MODEL = ${{ secrets.MODEL }} >> .env
|
||||
echo REDIS_IP = ${{ secrets.REDIS_IP }} >> .env
|
||||
echo REDIS_PORT = ${{ secrets.REDIS_PORT }} >> .env
|
||||
|
||||
@@ -61,6 +62,7 @@ jobs:
|
||||
echo CLIENT_TOKEN = ${{ secrets.BOT_TOKEN }} >> .env
|
||||
echo OLLAMA_IP = ${{ secrets.OLLAMA_IP }} >> .env
|
||||
echo OLLAMA_PORT = ${{ secrets.OLLAMA_PORT }} >> .env
|
||||
echo MODEL = ${{ secrets.MODEL }} >> .env
|
||||
echo REDIS_IP = ${{ secrets.REDIS_IP }} >> .env
|
||||
echo REDIS_PORT = ${{ secrets.REDIS_PORT }} >> .env
|
||||
|
||||
|
||||
3
.github/workflows/coverage.yml
vendored
3
.github/workflows/coverage.yml
vendored
@@ -30,12 +30,13 @@ jobs:
|
||||
echo CLIENT_TOKEN = ${{ secrets.BOT_TOKEN }} >> .env
|
||||
echo OLLAMA_IP = ${{ secrets.OLLAMA_IP }} >> .env
|
||||
echo OLLAMA_PORT = ${{ secrets.OLLAMA_PORT }} >> .env
|
||||
echo MODEL = ${{ secrets.MODEL }} >> .env
|
||||
echo REDIS_IP = ${{ secrets.REDIS_IP }} >> .env
|
||||
echo REDIS_PORT = ${{ secrets.REDIS_PORT }} >> .env
|
||||
|
||||
- name: Collect Code Coverage
|
||||
run: |
|
||||
LINE_PCT=$(npm run test:coverage | tail -2 | head -1 | awk '{print $3}')
|
||||
LINE_PCT=$(npm run coverage | tail -2 | head -1 | awk '{print $3}')
|
||||
echo "COVERAGE=$LINE_PCT" >> $GITHUB_ENV
|
||||
|
||||
- name: Upload Code Coverage
|
||||
|
||||
1
.github/workflows/deploy.yml
vendored
1
.github/workflows/deploy.yml
vendored
@@ -21,6 +21,7 @@ jobs:
|
||||
echo CLIENT_TOKEN = ${{ secrets.CLIENT }} >> .env
|
||||
echo OLLAMA_IP = ${{ secrets.OLLAMA_IP }} >> .env
|
||||
echo OLLAMA_PORT = ${{ secrets.OLLAMA_PORT }} >> .env
|
||||
echo MODEL = ${{ secrets.MODEL }} >> .env
|
||||
echo DISCORD_IP = ${{ secrets.DISCORD_IP }} >> .env
|
||||
echo SUBNET_ADDRESS = ${{ secrets.SUBNET_ADDRESS }} >> .env
|
||||
echo REDIS_IP = ${{ secrets.REDIS_IP }} >> .env
|
||||
|
||||
3
.github/workflows/test.yml
vendored
3
.github/workflows/test.yml
vendored
@@ -41,9 +41,10 @@ jobs:
|
||||
echo CLIENT_TOKEN = ${{ secrets.BOT_TOKEN }} >> .env
|
||||
echo OLLAMA_IP = ${{ secrets.OLLAMA_IP }} >> .env
|
||||
echo OLLAMA_PORT = ${{ secrets.OLLAMA_PORT }} >> .env
|
||||
echo MODEL = ${{ secrets.MODEL }} >> .env
|
||||
echo REDIS_IP = ${{ secrets.REDIS_IP }} >> .env
|
||||
echo REDIS_PORT = ${{ secrets.REDIS_PORT }} >> .env
|
||||
|
||||
- name: Test Application
|
||||
run: |
|
||||
npm run test:run
|
||||
npm run tests
|
||||
|
||||
@@ -7,11 +7,12 @@ services:
|
||||
build: ./ # find docker file in designated path
|
||||
container_name: discord
|
||||
restart: always # rebuild container always
|
||||
image: kevinthedang/discord-ollama:0.8.3
|
||||
image: kevinthedang/discord-ollama:0.8.5
|
||||
environment:
|
||||
CLIENT_TOKEN: ${CLIENT_TOKEN}
|
||||
OLLAMA_IP: ${OLLAMA_IP}
|
||||
OLLAMA_PORT: ${OLLAMA_PORT}
|
||||
MODEL: ${MODEL}
|
||||
REDIS_IP: ${REDIS_IP}
|
||||
REDIS_PORT: ${REDIS_PORT}
|
||||
networks:
|
||||
|
||||
1054
package-lock.json
generated
1054
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
18
package.json
18
package.json
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "discord-ollama",
|
||||
"version": "0.8.3",
|
||||
"version": "0.8.5",
|
||||
"description": "Ollama Integration into discord",
|
||||
"main": "build/index.js",
|
||||
"exports": "./build/index.js",
|
||||
"scripts": {
|
||||
"test:run": "vitest run",
|
||||
"test:coverage": "vitest run --coverage",
|
||||
"tests": "vitest run",
|
||||
"coverage": "vitest run --coverage",
|
||||
"watch": "tsx watch src",
|
||||
"build": "tsc",
|
||||
"prod": "node .",
|
||||
@@ -28,16 +28,16 @@
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"discord.js": "^14.18.0",
|
||||
"dotenv": "^16.4.7",
|
||||
"ollama": "^0.5.13",
|
||||
"dotenv": "^16.5.0",
|
||||
"ollama": "^0.5.15",
|
||||
"redis": "^4.7.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^22.13.5",
|
||||
"@vitest/coverage-v8": "^3.0.6",
|
||||
"@types/node": "^22.13.14",
|
||||
"@vitest/coverage-v8": "^3.0.9",
|
||||
"ts-node": "^10.9.2",
|
||||
"tsx": "^4.19.3",
|
||||
"typescript": "^5.7.3",
|
||||
"typescript": "^5.8.2",
|
||||
"vitest": "^3.0.4"
|
||||
},
|
||||
"type": "module",
|
||||
@@ -45,4 +45,4 @@
|
||||
"npm": ">=10.9.0",
|
||||
"node": ">=22.12.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -34,10 +34,14 @@ registerEvents(client, Events, messageHistory, ollama, Keys.defaultModel)
|
||||
|
||||
// Try to connect to redis
|
||||
await redis.connect()
|
||||
.then(() => console.log('[Redis] Connected'))
|
||||
.catch((error) => {
|
||||
console.error('[Redis] Connection Error', error)
|
||||
process.exit(1)
|
||||
.then(response => {
|
||||
console.log('[Redis] Successfully Connected')
|
||||
})
|
||||
.catch(error => {
|
||||
console.error('[Redis] Connection Error. See error below:\n', error)
|
||||
console.warn('[Redis] Failed to connect to Redis Database, using local system')
|
||||
// TODO: create boolean flag that will probably be used in messageCreate.ts if redis database is down
|
||||
// When implementing this boolean flag, move connection to database BEFORE the registerEvents method
|
||||
})
|
||||
|
||||
// Try to log in the client
|
||||
|
||||
@@ -37,9 +37,21 @@ export const DeleteModel: SlashCommand = {
|
||||
}
|
||||
|
||||
// check if model exists
|
||||
const modelExists: boolean = await ollama.list()
|
||||
const modelExists = await ollama.list()
|
||||
.then(response => response.models.some((model: ModelResponse) => model.name.startsWith(modelInput)))
|
||||
.catch(error => {
|
||||
console.error(`[Command: delete-model] Failed to connect with Ollama service. Error: ${error.message}`)
|
||||
})
|
||||
|
||||
// Validate for any issue or if service is running
|
||||
if (!modelExists) {
|
||||
interaction.editReply({
|
||||
content: `The Ollama service is not running. Please turn on/download the [service](https://ollama.com/).`
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
try {
|
||||
// call ollama to delete model
|
||||
if (modelExists) {
|
||||
|
||||
@@ -36,9 +36,21 @@ export const PullModel: SlashCommand = {
|
||||
return
|
||||
}
|
||||
|
||||
// check if model was already pulled
|
||||
const modelExists: boolean = await ollama.list()
|
||||
// check if model was already pulled, if the ollama service isn't running throw error
|
||||
const modelExists = await ollama.list()
|
||||
.then(response => response.models.some((model: ModelResponse) => model.name.startsWith(modelInput)))
|
||||
.catch(error => {
|
||||
console.error(`[Command: pull-model] Failed to connect with Ollama service. Error: ${error.message}`)
|
||||
})
|
||||
|
||||
// Validate for any issue or if service is running
|
||||
if (!modelExists) {
|
||||
interaction.editReply({
|
||||
content: `The Ollama service is not running. Please turn on/download the [service](https://ollama.com/).`
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
try {
|
||||
// call ollama to pull desired model
|
||||
|
||||
@@ -45,6 +45,9 @@ export const SwitchModel: SlashCommand = {
|
||||
}
|
||||
}
|
||||
})
|
||||
.catch(error => {
|
||||
console.error(`[Command: switch-model] Failed to connect with Ollama service. Error: ${error.message}`)
|
||||
})
|
||||
// todo: problem can be here if async messes up
|
||||
if (switchSuccess) {
|
||||
// set model now that it exists
|
||||
@@ -56,10 +59,13 @@ export const SwitchModel: SlashCommand = {
|
||||
interaction.editReply({
|
||||
content: `Could not find **${modelInput}** in local model library.\n\nPlease contact an server admin for access to this model.`
|
||||
})
|
||||
} catch (error) {
|
||||
} catch (error: any) {
|
||||
// could not resolve user model switch
|
||||
if (error.message.includes("fetch failed") as string)
|
||||
error.message = "The Ollama service is not running. Please turn on/download the [service](https://ollama.com/)."
|
||||
|
||||
interaction.editReply({
|
||||
content: `Unable to switch user preferred model to **${modelInput}**.\n\n${error}\n\nPossible solution is to request an server admin run \`/pull-model ${modelInput}\` and try again.`
|
||||
content: `Unable to switch user preferred model to **${modelInput}**.\n\n${error.message}`
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -71,9 +71,8 @@ export default event(Events.MessageCreate, async ({ log, msgHist, ollama, client
|
||||
userConfig = await new Promise((resolve, reject) => {
|
||||
getUserConfig(`${message.author.username}-config.json`, (config) => {
|
||||
if (config === undefined) {
|
||||
openConfig(`${message.author.username}-config.json`, 'message-style', false)
|
||||
openConfig(`${message.author.username}-config.json`, 'switch-model', defaultModel)
|
||||
reject(new Error('No User Preferences is set up.\n\nCreating preferences file with \`message-style\` set as \`false\` for regular message style.\nPlease try chatting again.'))
|
||||
reject(new Error(`No User Preferences is set up.\n\nCreating new preferences file for ${message.author.username}\nPlease try chatting again.`))
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -74,10 +74,11 @@ export async function normalMessage(
|
||||
}
|
||||
} catch (error: any) {
|
||||
console.log(`[Util: messageNormal] Error creating message: ${error.message}`)
|
||||
if (error.message.includes('try pulling it first'))
|
||||
sentMessage.edit(`**Response generation failed.**\n\nReason: You do not have the ${model} downloaded. Ask an admin to pull it using the \`pull-model\` command.`)
|
||||
else
|
||||
sentMessage.edit(`**Response generation failed.**\n\nReason: ${error.message}`)
|
||||
if (error.message.includes('fetch failed'))
|
||||
error.message = 'Missing ollama service on machine'
|
||||
else if (error.message.includes('try pulling it first'))
|
||||
error.message = `You do not have the ${model} downloaded. Ask an admin to pull it using the \`pull-model\` command.`
|
||||
sentMessage.edit(`**Response generation failed.**\n\nReason: ${error.message}`)
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
Reference in New Issue
Block a user