1 Commits

Author SHA1 Message Date
Kevin Dang
9247463480 hardcoded and mentions
* added options to queries

* removed hard coded vals, added message options

* updated importing

* added check for message mentions

* fix missing botID

* updated token to uid

* added contributer

---------

Co-authored-by: JT2M0L3Y <jtsmoley@icloud.com>
2024-01-29 12:50:59 -08:00
8 changed files with 166 additions and 75 deletions

View File

@@ -32,5 +32,6 @@ Ollama is an AI model management tool that allows users to install and use custo
## Acknowledgement
* [Kevin Dang](https://github.com/kevinthedang)
* [Jonathan Smoley](https://github.com/JT2M0L3Y)
[discord-ollama](https://github.com/kevinthedang/discord-ollama) © 2023 by [Kevin Dang](https://github.com/kevinthedang) is licensed under [CC BY-NC 4.0](https://creativecommons.org/licenses/by-nc/4.0/?ref=chooser-v1)

View File

@@ -5,6 +5,7 @@ import Events from "./events/index.js";
// Import keys/tokens
import Keys from "./keys.js";
// initialize the client with the following permissions when logging in
const client = new Client({
intents: [
GatewayIntentBits.Guilds,
@@ -16,12 +17,19 @@ const client = new Client({
const messageHistory = [
{
role: 'assistant',
content: 'My name is Ollama GU.'
role: 'system',
content: 'Your name is Ollama GU'
}
]
registerEvents(client, Events, messageHistory)
/**
* register events for bot to listen to in discord
* @param messageHistory message history for the llm
* @param Events events to register
* @param client the bot reference
* @param Keys tokens from .env files
*/
registerEvents(client, Events, messageHistory, Keys)
// Try to log in the client
client.login(Keys.clientToken)

View File

@@ -1,90 +1,39 @@
import { event, Events } from '../utils/index.js'
import { EmbedBuilder } from 'discord.js'
import ollama from 'ollama'
import Axios from 'axios'
import { embedMessage, event, Events } from '../utils/index.js'
/**
* Max Message length for free users is 2000 characters (bot or not).
* @param message the message received from the channel
*/
export default event(Events.MessageCreate, async ({ log, msgHist }, message) => {
export default event(Events.MessageCreate, async ({ log, msgHist, tokens }, message) => {
log(`Message created \"${message.content}\" from ${message.author.tag}.`)
// Hard-coded channel to test output there only, in our case "ollama-endpoint"
if (message.channelId != '1188262786497785896') return
if (message.channelId != tokens.channel) return
// Do not respond if bot talks in the chat
if (message.author.tag === message.client.user.tag) return
// Only respond if message mentions the bot
if (!message.mentions.has(tokens.botUid)) return
// push user response
msgHist.push({
role: 'user',
content: message.content
})
const botMessage = new EmbedBuilder()
.setTitle(`Response to ${message.author.tag}`)
.setDescription('Generating Response . . .')
.setColor('#00FF00')
// Try to query and send embed
const response = await embedMessage(message, tokens, msgHist)
const sentMessage = await message.channel.send({ embeds: [botMessage] })
// Try to query and send message
// log(normalMessage(message, tokens, msgHist))
const request = async () => {
try {
// change this when using an actual hosted server or use ollama.js
const response = await ollama.chat({
model: 'llama2',
messages: msgHist,
stream: false
})
// If something bad happened, remove user query and stop
if (response == undefined) { msgHist.pop(); return }
const embed = new EmbedBuilder()
.setTitle(`Response to ${message.author.tag}`)
.setDescription(response.message.content)
.setColor('#00FF00')
sentMessage.edit({ embeds: [embed] })
// push bot response
msgHist.push({
role: 'assistant',
content: response.message.content
})
} catch (error) {
message.edit(error as string)
log(error)
}
}
// Attempt to call ollama's endpoint
request()
// Reply with something to prompt that ollama is working, version without embed
message.reply("Generating Response . . .").then(sentMessage => {
// Request made to API
const request = async () => {
try {
// change this when using an actual hosted server or use ollama.js
const response = await Axios.post('http://127.0.0.1:11434/api/chat', {
model: 'llama2',
messages: msgHist,
stream: false
})
sentMessage.edit(response.data.message.content)
// push bot response
// msgHist.push({
// role: 'assistant',
// content: response.data.message.content
// })
} catch (error) {
message.edit(error as string)
log(error)
}
}
// Attempt to call ollama's endpoint
request()
// successful query, save it as history
msgHist.push({
role: 'assistant',
content: response.message.content
})
})

View File

@@ -1,7 +1,10 @@
import { getEnvVar } from "./utils/env.js"
export const Keys = {
clientToken: getEnvVar('CLIENT_TOKEN')
clientToken: getEnvVar('CLIENT_TOKEN'),
channel: getEnvVar('CHANNEL_ID'),
model: getEnvVar('MODEL'),
botUid: getEnvVar('BOT_UID')
} as const // readonly keys
export default Keys

View File

@@ -11,6 +11,11 @@ export interface EventProps {
client: Client
log: LogMethod
msgHist: { role: string, content: string }[]
tokens: {
channel: string,
model: string,
botUid: string
}
}
export type EventCallback<T extends EventKeys> = (
props: EventProps,
@@ -27,7 +32,16 @@ export function event<T extends EventKeys>(key: T, callback: EventCallback<T>):
return { key, callback }
}
export function registerEvents(client: Client, events: Event[], msgHist: { role: string, content: string }[]): void {
export function registerEvents(
client: Client,
events: Event[],
msgHist: { role: string, content: string }[],
tokens: {
channel: string,
model: string,
botUid: string
}
): void {
for (const { key, callback } of events) {
client.on(key, (...args) => {
// Create a new log method for this event
@@ -35,7 +49,7 @@ export function registerEvents(client: Client, events: Event[], msgHist: { role:
// Handle Errors, call callback, log errors as needed
try {
callback({ client, log, msgHist }, ...args)
callback({ client, log, msgHist, tokens }, ...args)
} catch (error) {
log('[Uncaught Error]', error)
}

View File

@@ -1,3 +1,5 @@
// Centralized import index
export * from './env.js';
export * from './events.js';
export * from './env.js'
export * from './events.js'
export * from './messageEmbed.js'
export * from './messageNormal.js'

66
src/utils/messageEmbed.ts Normal file
View File

@@ -0,0 +1,66 @@
import { EmbedBuilder, Message } from "discord.js";
import ollama, { ChatResponse } from "ollama";
/**
* Method to send replies as normal text on discord like any other user
* @param message message sent by the user
* @param tokens tokens to run query
* @param msgHist message history between user and model
*/
export async function embedMessage(
message: Message,
tokens: {
channel: string,
model: string
},
msgHist: {
role: string,
content: string
}[]
) {
// bot response
let response: ChatResponse
// initial message to client
const botMessage = new EmbedBuilder()
.setTitle(`Responding to ${message.author.tag}`)
.setDescription('Generating Response . . .')
.setColor('#00FF00')
// send the message
const sentMessage = await message.channel.send({ embeds: [botMessage] })
try {
// Attempt to query model for message
response = await ollama.chat({
model: tokens.model,
messages: msgHist,
options: {
num_thread: 8, // remove if optimization needed further
mirostat: 1,
mirostat_tau: 2.0,
top_k: 70
},
stream: false
})
const newEmbed = new EmbedBuilder()
.setTitle(`Responding to ${message.author.tag}`)
.setDescription(response.message.content)
.setColor('#00FF00')
// edit the message
sentMessage.edit({ embeds: [newEmbed] })
} catch(error: any) {
const errorEmbed = new EmbedBuilder()
.setTitle(`Responding to ${message.author.tag}`)
.setDescription(error.error)
.setColor('#00FF00')
// send back error
sentMessage.edit({ embeds: [errorEmbed] })
}
// Hope there is a response! undefined otherwie
return response!!
}

View File

@@ -0,0 +1,48 @@
import { Message } from "discord.js";
import ollama, { ChatResponse } from "ollama";
/**
* Method to send replies as normal text on discord like any other user
* @param message message sent by the user
* @param tokens tokens to run query
* @param msgHist message history between user and model
*/
export function normalMessage(
message: Message,
tokens: {
channel: string,
model: string
},
msgHist: {
role: string,
content: string
}[]
) {
// bot's respnse
let response: ChatResponse
message.reply('Generating Response . . .').then(async sentMessage => {
try {
// Attempt to query model for message
response = await ollama.chat({
model: tokens.model,
messages: msgHist,
options: {
num_thread: 8, // remove if optimization needed further
mirostat: 1,
mirostat_tau: 2.0,
top_k: 70
},
stream: false
})
// edit the 'generic' response to new message
sentMessage.edit(response.message.content)
} catch(error: any) {
sentMessage.edit(error.error)
}
})
// Hope there is a response, force client to believe
return response!!
}