diff --git a/package.json b/package.json index f67a09b..cfae00a 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "discord-ollama", - "version": "0.1.1", + "version": "0.1.2", "description": "Ollama Integration into discord", "main": "dist/index.js", "exports": "./dist/index.js", diff --git a/src/events/messageCreate.ts b/src/events/messageCreate.ts index 9cc54bf..00df794 100644 --- a/src/events/messageCreate.ts +++ b/src/events/messageCreate.ts @@ -1,10 +1,13 @@ import { event, Events } from '../utils/index.js' +import { EmbedBuilder } from 'discord.js' +import ollama from 'ollama' import Axios from 'axios' -/* +/** * Max Message length for free users is 2000 characters (bot or not). + * @param message the message received from the channel */ -export default event(Events.MessageCreate, ({ log, msgHist }, message) => { +export default event(Events.MessageCreate, async ({ log, msgHist }, message) => { log(`Message created \"${message.content}\" from ${message.author.tag}.`) // Hard-coded channel to test output there only, in our case "ollama-endpoint" @@ -19,7 +22,44 @@ export default event(Events.MessageCreate, ({ log, msgHist }, message) => { content: message.content }) - // Reply with something to prompt that ollama is working + const botMessage = new EmbedBuilder() + .setTitle(`Response to ${message.author.tag}`) + .setDescription('Generating Response . . .') + .setColor('#00FF00') + + const sentMessage = await message.channel.send({ embeds: [botMessage] }) + + const request = async () => { + try { + // change this when using an actual hosted server or use ollama.js + const response = await ollama.chat({ + model: 'llama2', + messages: msgHist, + stream: false + }) + + const embed = new EmbedBuilder() + .setTitle(`Response to ${message.author.tag}`) + .setDescription(response.message.content) + .setColor('#00FF00') + + sentMessage.edit({ embeds: [embed] }) + + // push bot response + msgHist.push({ + role: 'assistant', + content: response.message.content + }) + } catch (error) { + message.edit(error as string) + log(error) + } + } + + // Attempt to call ollama's endpoint + request() + + // Reply with something to prompt that ollama is working, version without embed message.reply("Generating Response . . .").then(sentMessage => { // Request made to API const request = async () => { @@ -30,19 +70,19 @@ export default event(Events.MessageCreate, ({ log, msgHist }, message) => { messages: msgHist, stream: false }) - log(response.data) sentMessage.edit(response.data.message.content) // push bot response - msgHist.push({ - role: 'assistant', - content: response.data.message.content - }) + // msgHist.push({ + // role: 'assistant', + // content: response.data.message.content + // }) } catch (error) { + message.edit(error as string) log(error) } - } + } // Attempt to call ollama's endpoint request() diff --git a/src/utils/streamParse.ts b/src/utils/streamParse.ts new file mode 100644 index 0000000..7ab5d8e --- /dev/null +++ b/src/utils/streamParse.ts @@ -0,0 +1,23 @@ +import { AxiosResponse } from "axios"; + +/** + * When running a /api/chat stream, the output needs to be parsed into an array of objects + * @param stream Axios response to from Ollama + */ +export function parseStream(stream: AxiosResponse) { + // split string by newline + const keywordObjects: string[] = stream.data.trim().split('\n') + + // parse string and load them into objects + const keywordsArray: { + model: string, + created_at: string, + message: { + role: string, + content: string + }, + done: boolean + }[] = keywordObjects.map((keywordString) => JSON.parse(keywordString)) + + return keywordsArray +} \ No newline at end of file