Streamlined Preferences Setup and Default Model (#148)

* Update: Streamlinded setup and Default Model

* Update: version increment
This commit is contained in:
Kevin Dang
2024-12-11 17:53:35 -08:00
committed by GitHub
parent d570a50d46
commit 6ac45afb13
9 changed files with 97 additions and 54 deletions

View File

@@ -1,6 +1,9 @@
# Discord token for the bot # Discord token for the bot
CLIENT_TOKEN = BOT_TOKEN CLIENT_TOKEN = BOT_TOKEN
# Default model for new users
MODEL = DEFAULT_MODEL
# ip/port address of docker container, I use 172.18.0.3 for docker, 127.0.0.1 for local # ip/port address of docker container, I use 172.18.0.3 for docker, 127.0.0.1 for local
OLLAMA_IP = IP_ADDRESS OLLAMA_IP = IP_ADDRESS
OLLAMA_PORT = PORT OLLAMA_PORT = PORT

View File

@@ -7,7 +7,7 @@ services:
build: ./ # find docker file in designated path build: ./ # find docker file in designated path
container_name: discord container_name: discord
restart: always # rebuild container always restart: always # rebuild container always
image: kevinthedang/discord-ollama:0.7.3 image: kevinthedang/discord-ollama:0.7.4
environment: environment:
CLIENT_TOKEN: ${CLIENT_TOKEN} CLIENT_TOKEN: ${CLIENT_TOKEN}
OLLAMA_IP: ${OLLAMA_IP} OLLAMA_IP: ${OLLAMA_IP}

4
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{ {
"name": "discord-ollama", "name": "discord-ollama",
"version": "0.7.3", "version": "0.7.4",
"lockfileVersion": 3, "lockfileVersion": 3,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "discord-ollama", "name": "discord-ollama",
"version": "0.7.3", "version": "0.7.4",
"license": "ISC", "license": "ISC",
"dependencies": { "dependencies": {
"discord.js": "^14.16.3", "discord.js": "^14.16.3",

View File

@@ -1,6 +1,6 @@
{ {
"name": "discord-ollama", "name": "discord-ollama",
"version": "0.7.3", "version": "0.7.4",
"description": "Ollama Integration into discord", "description": "Ollama Integration into discord",
"main": "build/index.js", "main": "build/index.js",
"exports": "./build/index.js", "exports": "./build/index.js",

View File

@@ -25,7 +25,7 @@ export const ollama = new Ollama({
const messageHistory: Queue<UserMessage> = new Queue<UserMessage> const messageHistory: Queue<UserMessage> = new Queue<UserMessage>
// register all events // register all events
registerEvents(client, Events, messageHistory, ollama) registerEvents(client, Events, messageHistory, ollama, Keys.defaultModel)
// Try to log in the client // Try to log in the client
await client.login(Keys.clientToken) await client.login(Keys.clientToken)

View File

@@ -8,7 +8,7 @@ import { getChannelInfo, getServerConfig, getUserConfig, openChannelInfo, openCo
* *
* @param message the message received from the channel * @param message the message received from the channel
*/ */
export default event(Events.MessageCreate, async ({ log, msgHist, ollama, client }, message) => { export default event(Events.MessageCreate, async ({ log, msgHist, ollama, client, defaultModel }, message) => {
const clientId = client.user!!.id const clientId = client.user!!.id
const cleanedMessage = clean(message.content, clientId) const cleanedMessage = clean(message.content, clientId)
log(`Message \"${cleanedMessage}\" from ${message.author.tag} in channel/thread ${message.channelId}.`) log(`Message \"${cleanedMessage}\" from ${message.author.tag} in channel/thread ${message.channelId}.`)
@@ -21,57 +21,88 @@ export default event(Events.MessageCreate, async ({ log, msgHist, ollama, client
// default stream to false // default stream to false
let shouldStream = false let shouldStream = false
// Params for Preferences Fetching
const maxRetries = 3
const delay = 1000 // in millisecons
try { try {
// Retrieve Server/Guild Preferences // Retrieve Server/Guild Preferences
await new Promise((resolve, reject) => { let attempt = 0
getServerConfig(`${message.guildId}-config.json`, (config) => { while (attempt < maxRetries) {
// check if config.json exists try {
if (config === undefined) { await new Promise((resolve, reject) => {
// Allowing chat options to be available getServerConfig(`${message.guildId}-config.json`, (config) => {
openConfig(`${message.guildId}-config.json`, 'toggle-chat', true) // check if config.json exists
reject(new Error('No Server Preferences is set up.\n\nCreating default server preferences file...\nPlease try chatting again.')) if (config === undefined) {
return // Allowing chat options to be available
} openConfig(`${message.guildId}-config.json`, 'toggle-chat', true)
reject(new Error('Failed to locate or create Server Preferences\n\nPlease try chatting again...'))
}
// check if chat is disabled
else if (!config.options['toggle-chat'])
reject(new Error('Admin(s) have disabled chat features.\n\n Please contact your server\'s admin(s).'))
else
resolve(config)
})
})
break // successful
} catch (error) {
++attempt
if (attempt < maxRetries) {
log(`Attempt ${attempt} failed for Server Preferences. Retrying in ${delay}ms...`)
await new Promise(ret => setTimeout(ret, delay))
} else
throw new Error(`Could not retrieve Server Preferences, please try chatting again...`)
}
}
// check if chat is disabled // Reset attempts for User preferences
if (!config.options['toggle-chat']) { attempt = 0
reject(new Error('Admin(s) have disabled chat features.\n\n Please contact your server\'s admin(s).')) let userConfig: UserConfig | undefined
return
}
resolve(config) while (attempt < maxRetries) {
}) try {
}) // Retrieve User Preferences
userConfig = await new Promise((resolve, reject) => {
getUserConfig(`${message.author.username}-config.json`, (config) => {
if (config === undefined) {
openConfig(`${message.author.username}-config.json`, 'message-style', false)
openConfig(`${message.author.username}-config.json`, 'switch-model', defaultModel)
reject(new Error('No User Preferences is set up.\n\nCreating preferences file with \`message-style\` set as \`false\` for regular message style.\nPlease try chatting again.'))
return
}
// check if there is a set capacity in config
else if (typeof config.options['modify-capacity'] !== 'number')
log(`Capacity is undefined, using default capacity of ${msgHist.capacity}.`)
else if (config.options['modify-capacity'] === msgHist.capacity)
log(`Capacity matches config as ${msgHist.capacity}, no changes made.`)
else {
log(`New Capacity found. Setting Context Capacity to ${config.options['modify-capacity']}.`)
msgHist.capacity = config.options['modify-capacity']
}
// Retrieve User Preferences // set stream state
const userConfig: UserConfig = await new Promise((resolve, reject) => { shouldStream = config.options['message-stream'] as boolean || false
getUserConfig(`${message.author.username}-config.json`, (config) => {
if (config === undefined) {
openConfig(`${message.author.username}-config.json`, 'message-style', false)
reject(new Error('No User Preferences is set up.\n\nCreating preferences file with \`message-style\` set as \`false\` for regular message style.\nPlease try chatting again.'))
return
}
// check if there is a set capacity in config
if (typeof config.options['modify-capacity'] !== 'number')
log(`Capacity is undefined, using default capacity of ${msgHist.capacity}.`)
else if (config.options['modify-capacity'] === msgHist.capacity)
log(`Capacity matches config as ${msgHist.capacity}, no changes made.`)
else {
log(`New Capacity found. Setting Context Capacity to ${config.options['modify-capacity']}.`)
msgHist.capacity = config.options['modify-capacity']
}
// set stream state
shouldStream = config.options['message-stream'] as boolean || false
if (typeof config.options['switch-model'] !== 'string') if (typeof config.options['switch-model'] !== 'string')
reject(new Error(`No Model was set. Please set a model by running \`/switch-model <model of choice>\`.\n\nIf you do not have any models. Run \`/pull-model <model name>\`.`)) reject(new Error(`No Model was set. Please set a model by running \`/switch-model <model of choice>\`.\n\nIf you do not have any models. Run \`/pull-model <model name>\`.`))
resolve(config) resolve(config)
}) })
}) })
break // successful
} catch (error) {
++attempt
if (attempt < maxRetries) {
log(`Attempt ${attempt} failed for User Preferences. Retrying in ${delay}ms...`)
await new Promise(ret => setTimeout(ret, delay))
} else
throw new Error(`Could not retrieve User Preferences, please try chatting again...`)
}
}
// need new check for "open/active" threads/channels here! // need new check for "open/active" threads/channels here!
let chatMessages: UserMessage[] = await new Promise((resolve) => { let chatMessages: UserMessage[] = await new Promise((resolve) => {
@@ -106,6 +137,9 @@ export default event(Events.MessageCreate, async ({ log, msgHist, ollama, client
// response string for ollama to put its response // response string for ollama to put its response
let response: string let response: string
if (!userConfig)
throw new Error(`Failed to initialize User Preference for **${message.author.username}**.\n\nIt's likely you do not have a model set. Please use the \`switch-model\` command to do that.`)
// get message attachment if exists // get message attachment if exists
const messageAttachment: string[] = await getAttachmentData(message.attachments.first()) const messageAttachment: string[] = await getAttachmentData(message.attachments.first())
const model: string = userConfig.options['switch-model'] const model: string = userConfig.options['switch-model']

View File

@@ -4,6 +4,7 @@ export const Keys = {
clientToken: getEnvVar('CLIENT_TOKEN'), clientToken: getEnvVar('CLIENT_TOKEN'),
ipAddress: getEnvVar('OLLAMA_IP', '127.0.0.1'), // default ollama ip if none ipAddress: getEnvVar('OLLAMA_IP', '127.0.0.1'), // default ollama ip if none
portAddress: getEnvVar('OLLAMA_PORT', '11434'), // default ollama port if none portAddress: getEnvVar('OLLAMA_PORT', '11434'), // default ollama port if none
defaultModel: getEnvVar('MODEL', 'llama3.2')
} as const // readonly keys } as const // readonly keys
export default Keys export default Keys

View File

@@ -36,7 +36,8 @@ export interface EventProps {
client: Client client: Client
log: LogMethod log: LogMethod
msgHist: Queue<UserMessage> msgHist: Queue<UserMessage>
ollama: Ollama ollama: Ollama,
defaultModel: String
} }
export type EventCallback<T extends EventKeys> = ( export type EventCallback<T extends EventKeys> = (
props: EventProps, props: EventProps,
@@ -64,7 +65,8 @@ export function registerEvents(
client: Client, client: Client,
events: Event[], events: Event[],
msgHist: Queue<UserMessage>, msgHist: Queue<UserMessage>,
ollama: Ollama ollama: Ollama,
defaultModel: String
): void { ): void {
for (const { key, callback } of events) { for (const { key, callback } of events) {
client.on(key, (...args) => { client.on(key, (...args) => {
@@ -73,7 +75,7 @@ export function registerEvents(
// Handle Errors, call callback, log errors as needed // Handle Errors, call callback, log errors as needed
try { try {
callback({ client, log, msgHist, ollama }, ...args) callback({ client, log, msgHist, ollama, defaultModel }, ...args)
} catch (error) { } catch (error) {
log('[Uncaught Error]', error) log('[Uncaught Error]', error)
} }

View File

@@ -73,7 +73,10 @@ export async function normalMessage(
} }
} catch(error: any) { } catch(error: any) {
console.log(`[Util: messageNormal] Error creating message: ${error.message}`) console.log(`[Util: messageNormal] Error creating message: ${error.message}`)
sentMessage.edit(`**Response generation failed.**\n\nReason: ${error.message}`) if (error.message.includes('try pulling it first'))
sentMessage.edit(`**Response generation failed.**\n\nReason: You do not have the ${model} downloaded. Ask an admin to pull it using the \`pull-model\` command.`)
else
sentMessage.edit(`**Response generation failed.**\n\nReason: ${error.message}`)
} }
}) })