using System.Net.Http; using System.Net.Http.Json; using System.Text.Json; using System.Text; using DocumentFormat.OpenXml.Bibliography; using DocumentFormat.OpenXml.Wordprocessing; using BLAIzor.Models; using Newtonsoft.Json; using Microsoft.AspNetCore.Components.Routing; using Microsoft.AspNetCore.Components; using static Google.Apis.Requests.BatchRequest; using DocumentFormat.OpenXml.Spreadsheet; using Microsoft.AspNetCore.Razor.Language.Intermediate; using Microsoft.EntityFrameworkCore.Storage.ValueConversion.Internal; using NuGet.Packaging; using Microsoft.AspNetCore.Mvc.Formatters; using Google.Api; using System.CodeDom; using Microsoft.AspNetCore.Mvc; using System.Text.Json.Serialization; using BLAIzor.Services; using Microsoft.DotNet.Scaffolding.Shared; using System.Xml.XPath; using Azure.Identity; using BLAIzor.Helpers; namespace BLAIzor.Services { public class AIService { private readonly HttpClient _httpClient; private readonly ContentService _contentService; private readonly ScopedContentService _scopedContentService; private readonly OpenAIEmbeddingService _openAIEmbeddingService; private readonly OpenAIApiService _openAIApiService; private readonly OpenAiRealtimeService _openAIRealtimeService; private readonly DeepSeekApiService _deepSeekApiService; private readonly CerebrasAPIService _cerebrasAPIService; private readonly QDrantService _qDrantService; private readonly NavigationManager _navigationManager; public static IConfiguration? _configuration; public AIService(HttpClient httpClient, ContentService contentService, ScopedContentService scopedContentService, QDrantService qDrantService, OpenAIEmbeddingService openAIEmbeddingService, OpenAIApiService openAIApiService, DeepSeekApiService deepSeekApiService, OpenAiRealtimeService openAIRealtimeService, CerebrasAPIService cerebrasAPIService, NavigationManager navigationManager, IConfiguration? configuration) { _httpClient = httpClient; _contentService = contentService; _scopedContentService = scopedContentService; _qDrantService = qDrantService; _openAIEmbeddingService = openAIEmbeddingService; _openAIApiService = openAIApiService; _deepSeekApiService = deepSeekApiService; _openAIRealtimeService = openAIRealtimeService; _cerebrasAPIService = cerebrasAPIService; _navigationManager = navigationManager; _configuration = configuration; _openAIApiService.RegisterCallback(HandleActionInvoked, HandleFinishedInvoked, HandleErrorInvoked); _cerebrasAPIService.RegisterCallback(HandleActionInvoked, HandleFinishedInvoked, HandleErrorInvoked); _openAIRealtimeService.RegisterCallback(HandleActionInvoked); } private const string OpenAiEndpoint = "https://api.openai.com/v1/chat/completions"; public string _apiKey; public static event Action? OnContentReceived; public static event Action? OnContentReceiveFinished; public static event Action? OnContentReceivedError; public static event Action? OnStatusChangeReceived; public static event Action? OnTextContentAvailable; public string Mood = "cool, and professional"; private string _workingContent = null; public bool UseWebsocket = false; private string AiProvider = ""; private string GetAiSettings() => _configuration?.GetSection("AiSettings")?.GetValue("Provider") ?? string.Empty; private void HandleActionInvoked(string sessionId, string streamedHtmlContent) { OnContentReceived?.Invoke(sessionId, streamedHtmlContent); } private void HandleErrorInvoked(string sessionId, string streamedHtmlContent) { OnContentReceivedError?.Invoke(sessionId, streamedHtmlContent); } private void HandleFinishedInvoked(string sessionId) { OnContentReceiveFinished?.Invoke(sessionId); } public string GetApiKey() { if (_configuration == null) { return string.Empty; } if (_configuration.GetSection("OpenAI") == null) { return string.Empty; } return _configuration.GetSection("OpenAI").GetValue("ApiKey")!; } public async Task GetChatGptWelcomeMessage(string sessionId, int SiteId, string menuList = "") { string currentUri = _navigationManager.Uri; //Console.Write($"\n\n SessionId: {sessionId}\n\n"); //string rootpath = System.IO.Path.Combine(System.IO.Directory.GetCurrentDirectory(), "wwwroot/Documents/" + _contentService.SelectedDocument); _apiKey = GetApiKey(); string qdrantPoint = await _qDrantService.GetContentAsync(SiteId, 0); string extractedText = ""; //TODO: this is the full object, should get the text from it, it sends the vectors too at the moment var selectedPoint = JsonConvert.DeserializeObject(qdrantPoint)!; if (selectedPoint != null) { extractedText = selectedPoint.result.payload.content; ////Console.Write($"\n -------------------------------- Found point: {selectedPoint.result.payload.content} \n"); //Console.Write($"\n -------------------------------- Found point: {selectedPoint.result.id} \n"); } SiteInfo site = await _scopedContentService.GetSiteInfoByIdAsync(SiteId); string siteEntity; if (!string.IsNullOrWhiteSpace(site.Entity)) { siteEntity = site.Entity; } else { siteEntity = "brand or company"; } string systemMessage = "You are a helpful, " + Mood + " assistant that welcomes the user speaking in the name of the " + siteEntity + " described by the content, on a website of " + _scopedContentService.SelectedBrandName + " in " + _scopedContentService.SelectedLanguage + ". Use the following content: `" + extractedText + "` " + //"and generate a short" +Mood+ "but kind marketing-oriented welcome message and introduction of the brand for the user, constructed as simple Bootstrap HTML codeblock with a

tagged title and a paragraph." + "and generate a" + Mood + " marketing-oriented welcome message and a summary of the content and introduction of the brand for the user, aiming to explain clearly, what does the company/person offer, constructed as simple Bootstrap HTML
codeblock with a

tagged title and a paragraph." + "If there is any logo, or not logo but main brand image in the document use that url, and add that as a bootstrap responsive ('img-fluid py-3') image, with the maximum height of 30vh." + //"In the end of your answer, always add in a new row: 'If you have any questions, you can simply ask either by typing in the message box, or by clicking the microphone icon on the top of the page. '"+ "Here is a list of topics " + menuList + ", make a new bootstrap clearfix and after that make a clickable bootstrap styled (btn btn-primary) button from each of the determined topics, " + "that calls the javascript function 'callAI({the name of the topic})' on click. " + "Do not include anything else than the html title and text elements, no css, no scripts, no head or other tags." + "Do not mark your answer with ```html or any other mark."; string userMessage = "Hello"; string streamedHtmlContent = string.Empty; if (!UseWebsocket) { AiProvider = GetAiSettings(); if (AiProvider == "cerebras") { await _cerebrasAPIService.GetCerebrasStreamedResponse(sessionId, systemMessage, userMessage); } else if (AiProvider == "chatgpt") { await _openAIApiService.GetChatGPTStreamedResponse(sessionId, systemMessage, userMessage); } else if (AiProvider == "deepseek") { //await _deepSeekApiService.GetChatGPTStreamedResponse(systemMessage, userMessage); } } else { await _openAIRealtimeService.GetChatGPTResponseAsync(sessionId, systemMessage, userMessage); } //_scopedContentService.CurrentDOM = streamedHtmlContent; ////Console.Write("Answer: " + streamedHtmlContent); //return streamedHtmlContent; } public async Task ProcessUserIntent(string sessionId, string userPrompt, int siteId, int templateId, string collectionName, string menuList = "") { //Console.WriteLine($"SITE ID: {siteId}"); OnStatusChangeReceived?.Invoke(sessionId, "Understanding your request..."); // Get JSON result based on siteId presence string resultJson = siteId >= 0 ? await GetJsonResultFromQuery(sessionId, siteId, userPrompt) : await GetJsonResultFromQuery(sessionId, userPrompt); //Console.WriteLine(resultJson); var baseResult = await ValidateAndFixJson(resultJson, FixJsonWithAI); //var baseResult = System.Text.Json.JsonSerializer.Deserialize(resultJson, new JsonSerializerOptions { PropertyNameCaseInsensitive = true }); var fixedResult = System.Text.Json.JsonSerializer.Serialize(baseResult); if (baseResult == null) { //Console.WriteLine("Invalid JSON response."); return; } OnStatusChangeReceived?.Invoke(sessionId, "Making a decision"); // Process result based on type switch (baseResult.Type.ToLower()) { case "methodresult": await ProcessMethodResult(sessionId, resultJson); break; case "textresult": await ProcessTextResult(sessionId, fixedResult, templateId, collectionName); break; case "examinationresult": await ProcessExaminationResult(sessionId, fixedResult, templateId, collectionName); break; case "errorresult": await ProcessErrorResult(sessionId, fixedResult); break; default: //Console.WriteLine("Unknown result type."); break; } } /// /// No reasoning needed just content retrieved and displayed as html /// /// /// /// /// /// /// /// public async Task ProcessContentRequest(string sessionId, MenuItem requestedMenu, int siteId, int templateId, string collectionName, string menuList = "", bool forceUnmodified = false) { //Console.Write($"\n\n SessionId: {sessionId}\n\n"); //string rootpath = System.IO.Path.Combine(System.IO.Directory.GetCurrentDirectory(), "wwwroot/Documents/" + _contentService.SelectedDocument); string extractedText = ""; if (requestedMenu != null) { string qDrantData = await _qDrantService.GetContentAsync(siteId, requestedMenu.PointId); QDrantGetContentPointResult _selectedPoint = new QDrantGetContentPointResult(); if (qDrantData != null) { _selectedPoint = JsonConvert.DeserializeObject(qDrantData)!; } extractedText = _selectedPoint.result.payload.name + ": " + _selectedPoint.result.payload.content + ", "; } string contentJson = await GetContentFromQuery(sessionId, "Enhance this text if needed, making its style and grammar suitable to be displayed as the content of a webpage", extractedText, forceUnmodified); await ProcessContent(sessionId, contentJson, templateId, collectionName); } public async Task ProcessContentRequest(string sessionId, string requestedMenu, int siteId, int templateId, string collectionName, string menuList = "", bool forceUnmodified = false) { //Console.Write($"\n\n SessionId: {sessionId}\n\n"); //string rootpath = System.IO.Path.Combine(System.IO.Directory.GetCurrentDirectory(), "wwwroot/Documents/" + _contentService.SelectedDocument); string extractedText = ""; float[] vector = []; OnStatusChangeReceived?.Invoke(sessionId, "Determining search vectors"); vector = await _openAIEmbeddingService.GenerateEmbeddingAsync(requestedMenu); OnStatusChangeReceived?.Invoke(sessionId, "Looking up content in the knowledge database"); var pointId = await _qDrantService.QueryContentAsync(siteId, vector, 3); if (pointId.Length > 0) { foreach (var item in pointId) { string qDrantData = await _qDrantService.GetContentAsync(siteId, item); QDrantGetContentPointResult _selectedPoint = new QDrantGetContentPointResult(); if (qDrantData != null) { _selectedPoint = JsonConvert.DeserializeObject(qDrantData)!; } extractedText += _selectedPoint.result.payload.name + ": " + _selectedPoint.result.payload.content + ", "; } } else { extractedText = "VECTOR ERROR: ZERO INFORMATION FOUND"; } string contentJson = await GetContentFromQuery(sessionId, "Enhance this text if needed, making its style and grammar suitable to be displayed as the content of a webpage", extractedText, forceUnmodified); await ProcessContent(sessionId, contentJson, templateId, collectionName); } // Refactored helper methods private async Task ProcessMethodResult(string sessionId, string resultJson) { var fixedResult = await ValidateAndFixJson(resultJson, FixJsonWithAI); //var methodResult = System.Text.Json.JsonSerializer.Deserialize(resultJson, new JsonSerializerOptions { PropertyNameCaseInsensitive = true }); if (fixedResult != null) { OnStatusChangeReceived?.Invoke(sessionId, "Initiating the task you requested"); await DisplayHtml(sessionId, fixedResult.Text, fixedResult.MethodToCall, fixedResult.Parameter); } } private async Task ProcessTextResult(string sessionId, string resultJson, int templateId, string collectionName) { var fixedResult = await ValidateAndFixJson(resultJson, FixJsonWithAI); //var textResult = System.Text.Json.JsonSerializer.Deserialize(resultJson, new JsonSerializerOptions { PropertyNameCaseInsensitive = true }); if (fixedResult != null) { string contentJson = await GetContentFromQuery(sessionId, fixedResult.Text, _workingContent); //Console.Write("\r \n ProcessTextResult: Content: " + contentJson + "\r \n"); await ProcessContent(sessionId, contentJson, templateId, collectionName); } } public async Task ValidateAndFixJson(string json, Func> aiFixer) { try { return System.Text.Json.JsonSerializer.Deserialize(json, new JsonSerializerOptions { PropertyNameCaseInsensitive = true }); } catch (Exception ex) { //Console.WriteLine($"❌ JSON parse failed: {ex.Message}"); var prompt = BuildJsonFixPrompt(json, ex.Message, typeof(T).Name); var fixedJson = await aiFixer(prompt); try { return System.Text.Json.JsonSerializer.Deserialize(fixedJson, new JsonSerializerOptions { PropertyNameCaseInsensitive = true }); } catch (Exception ex2) { //Console.WriteLine($"❌ AI-fix parse failed: {ex2.Message}"); return default; } } } public async Task FixJsonWithAI(string prompt) { AiProvider = GetAiSettings(); if (AiProvider == "cerebras") { return await _cerebrasAPIService.GetSimpleCerebrasResponseNoSession("You are a JSON-fixing assistant.", prompt); } else if (AiProvider == "chatgpt") { return await _openAIApiService.GetSimpleChatGPTResponseNoSession("You are a JSON-fixing assistant.", prompt); } else if (AiProvider == "deepseek") { return await _deepSeekApiService.GetSimpleChatGPTResponse("You are a JSON-fixing assistant.", prompt); } else { return ""; } //return await _openAIApiService.GetSimpleChatGPTResponseNoSession("You are a JSON-fixing assistant.", prompt); } private string BuildJsonFixPrompt(string json, string errorMessage, string targetTypeName) { return $""" The following JSON was supposed to be parsed into an object of type {targetTypeName}, but it failed with this error: {errorMessage} Please fix the formatting of the JSON so that it becomes valid and deserializable: --- JSON START --- {json} --- JSON END --- Only return the fixed JSON, with no explanation or formatting like ```json. """; } private async Task ProcessExaminationResult(string sessionId, string resultJson, int templateId, string collectionName) { var fixedResult = await ValidateAndFixJson(resultJson, FixJsonWithAI); //var explanationResult = System.Text.Json.JsonSerializer.Deserialize(resultJson, new JsonSerializerOptions { PropertyNameCaseInsensitive = true }); if (fixedResult != null) { string contentJson = await GetExplanationFromQuery(sessionId, fixedResult.Text, _scopedContentService.CurrentDOM); await ProcessContent(sessionId, contentJson, templateId, collectionName); } } private async Task ProcessContent(string sessionId, string contentJson, int templateId, string collectionName) { try { var fixedResult = await ValidateAndFixJson(contentJson, FixJsonWithAI); //var contentResult = System.Text.Json.JsonSerializer.Deserialize(contentJson, new JsonSerializerOptions { PropertyNameCaseInsensitive = true }); if (fixedResult != null) { Console.WriteLine($"\n\n Actual content: {fixedResult.Text} \n\n"); // Add reaction GIFs //contentResult.Photos.Add("Clarification request", "https://www.reactiongifs.com/r/martin.gif"); //contentResult.Photos.Add("Compliment response", "https://www.reactiongifs.com/r/review.gif"); //We have the text all available now, let's pass it to the voice generator //TODO modify photos handling, move audio generation to the layout area string removedNumbers = TextHelper.ReplaceNumbersAndSpecialCharacters(fixedResult.Text); Console.WriteLine(removedNumbers); OnTextContentAvailable?.Invoke(sessionId, removedNumbers); List snippets = await GetSnippetsForDisplay(sessionId, collectionName); //await DisplayLayoutPlanFromContent(sessionId, fixedResult.Text, snippets, fixedResult.Topics, fixedResult.Photos); var result = await DisplayLayoutPlanFromContent(sessionId, fixedResult.Text, snippets, fixedResult.Topics, fixedResult.Photos); if (result == null) result = new LayoutPlan(); await DisplayHtml(sessionId, result, snippets, fixedResult.Topics); //OnContentReceived?.Invoke(sessionId, result.Blocks.Count.ToString()); } } catch (Exception ex) { //Console.WriteLine($"Error processing content: {ex.Message}"); OnContentReceived?.Invoke(sessionId, ex.Message); } } private async Task ProcessErrorResult(string sessionId, string resultJson) { var errorResult = System.Text.Json.JsonSerializer.Deserialize(resultJson); if (errorResult != null) { //Console.WriteLine($"Error Result: {errorResult.Text}"); await DisplayLayoutPlanFromContent(sessionId, errorResult.Text, null, null, null); } } /// /// Let's get the actual content /// /// /// public async Task GetContentFromQuery(string sessionId, string userPrompt, string content = null, bool forceUnmodified = false) { string extractedText; if (content == null) { string rootpath = System.IO.Path.Combine(System.IO.Directory.GetCurrentDirectory(), "wwwroot/Documents/" + _scopedContentService.SelectedDocument); extractedText = WordFileReader.ExtractText(rootpath); } else extractedText = content; _apiKey = GetApiKey(); ////Console.Write("GetJSONResult called: " +extractedText); string systemMessage = ""; if (forceUnmodified) { systemMessage = "You are a helpful assistant of a website. Display the Content strictly in " + _scopedContentService.SelectedLanguage + " with a plain JSON object in the following format:\r\n\r\n1. " + //"**chatGPTContentResult**:\r\n " + "- `type`: A string with value `contentresult`.\r\n " + "- `text`: A string with the actual response.\r\n " + "- `topics`: A list of sections of the initial document." + "- `photos`: A dictionary of string key and string values, where the keys are the name of the subject that the photo is related to (like a person's name, or a section)," + " and the value is the actual, unmodified photo url.\r\n" + "**Document-Specific Instructions**:\r\n" + "Step 1: Start with defining above mentioned key topics of the initial document, and making the list of them. " + "Step 2: After that add the above mentioned relevant image urls list." + "Step 3: " + "- Turn the following content into a nice informative webpage content (DO NOT REMOVE URLS, PHOTO URLS though).\r\n " + "- Start with the page title.\r\n" + "- Structure it nicely without leaving out any information.\r\n " + //"*** CONTENT START *** {" + extractedText + "} *** CONTENT END ***.\r\n" + "**Style and Image Handling**:\r\n" + "- Make sure the json is valid json." + "- Do NOT include extraneous text outside the JSON structure.\r\n\r\n" + "When you understand the input, follow these rules strictly. Otherwise, seek clarification.\r\n" + "Do not include linebreaks or any formatting, just the plain json string. Make sure it is valid json, and every objects is closed properly" + "Do NOT mark your answer with anything like `````json, and do not add any explanation."; userPrompt = "Give me a formatted json from this content, without modifying the text: " + extractedText; } else { systemMessage = "You are a helpful assistant of a website. Respond in the name of the brand or person in the content, strictly in " + _scopedContentService.SelectedLanguage + " with a plain JSON object in the following format:\r\n\r\n1. " + //"**chatGPTContentResult**:\r\n " + "- `type`: A string with value `contentresult`.\r\n " + "- `text`: A string with the actual response.\r\n " + "- `topics`: A list of sections of the initial document." + "- `photos`: A dictionary of string key and string values, where the keys are the name of the subject that the photo is related to (like a person's name, or a section)," + " and the value is the actual, unmodified photo url.\r\n" + "**Document-Specific Instructions**:\r\n" + "Step 1: Start with defining above mentioned key topics of the initial document, and making the list of them. " + "Step 2: After that add the above mentioned relevant image urls list." + "Step 3: " + "- Base a detailed, but not lengthy response solely on the initial document provided below. " + "- In your response, summarize ALL relevant information in the document, that is connected to the question." + "*** CONTENT START *** {" + extractedText + "} *** CONTENT END ***.\r\n" + "- For missing information: Inform the user and ask if you can help with something else. " + //"- Do not generate lengthy answers." + "- If the user prompt is clear and they ask specific, well defined question, do not add other infromation or welcome message." + "- If the user prompt is unclear, or makes no sense, ask for clarification." + //"You can decorate your clarification" + //"request with this image URL: `https://www.reactiongifs.com/r/martin.gif` added to the photo dictionary.\r\n\r\n" + //"- For compliments from the user: Express our happiness about it " + //"and apply this image URL: `https://www.reactiongifs.com/r/review.gif` in the photo dictionary.\r\n\r\n" + "**Style and Image Handling**:\r\n" + "- Make sure the json is valid json." + "- Do NOT include extraneous text outside the JSON structure.\r\n\r\n" + "When you understand the input, follow these rules strictly. Otherwise, seek clarification.\r\n" + "Do not include linebreaks or any formatting, just the plain json string. Make sure it is valid json, and every objects is closed properly" + "Do NOT mark your answer with anything like `````json, and do not add any explanation."; } OnStatusChangeReceived?.Invoke(sessionId, "Constructing the answer"); string interMediateResult = string.Empty; if (!UseWebsocket) { AiProvider = GetAiSettings(); if (AiProvider == "cerebras") { interMediateResult = await _cerebrasAPIService.GetSimpleCerebrasResponse(sessionId, systemMessage, userPrompt); } else if (AiProvider == "chatgpt") { interMediateResult = await _openAIApiService.GetSimpleChatGPTResponse(sessionId, systemMessage, userPrompt); } else if (AiProvider == "deepseek") { interMediateResult = await _deepSeekApiService.GetSimpleChatGPTResponse(sessionId, systemMessage, userPrompt); } } else { interMediateResult = await _openAIRealtimeService.GetFullChatGPTResponseAsync(sessionId, systemMessage, userPrompt); } OnStatusChangeReceived?.Invoke(sessionId, "Mkay, I know now"); //Console.Write("GetContentFromQuery: Result decision: " + interMediateResult); return interMediateResult; } public async Task GetExplanationFromQuery(string sessionId, string userPrompt, string content = null) { string extractedText; if (content == null) { string rootpath = System.IO.Path.Combine(System.IO.Directory.GetCurrentDirectory(), "wwwroot/Documents/" + _scopedContentService.SelectedDocument); extractedText = WordFileReader.ExtractText(rootpath); } else extractedText = content; _apiKey = GetApiKey(); ////Console.Write("GetJSONResult called: " +extractedText); var systemMessage = "You are a helpful assistant. Respond strictly in " + _scopedContentService.SelectedLanguage + " as a JSON object in the following format:\r\n\r\n1. " + //"**chatGPTContentResult**:\r\n " + "- `type`: A string with value `contentresult`.\r\n " + "- `text`: A string with the actual response.\r\n " + "- `topics`: A list of sections of the initial document." + "- `photos`: A dictionary of string key and string values, where the keys are the name of the subject that the photo is related to (like a person's name, or a section)," + " and the value is the actual, unmodified photo url.\r\n" + "**Document-Specific Instructions**:\r\n- Base responses solely on the initial document: {" + extractedText + "}.\r\n" + "- For missing information: Inform the user and provide a clarification. " + "- If the user prompt is clear and they ask specific, well defined question, do not add other infromation or welcome message." + "- If the user prompt is unclear, or makes no sense, ask for clarification. " + "You may decorate your clarification" + "request with this image URL: `https://www.reactiongifs.com/r/martin.gif` added to the photo dictionary.\r\n\r\n" + "- For compliments from the user: Express our happiness about it " + "and apply this image URL: `https://www.reactiongifs.com/r/review.gif` in the photo dictionary.\r\n\r\n" + "**Style and Image Handling**:\r\n" + //"- Copy styles explicitly from the document into the response.\r\n" + //"- Only use image URLs found in the document for relevant content.\r\n" + "- Do NOT include extraneous text outside the JSON structure.\r\n\r\n" + "When you understand the input, follow these rules strictly. Otherwise, seek clarification.\r\n" + "Do not include linebreaks or any formatting, just the plain json string. Make sure it is valid json, and every objects is closed properly" + "Do NOT mark your answer with anything like `````json or such."; OnStatusChangeReceived?.Invoke(sessionId, "Constructing the answer"); string interMediateResult = string.Empty; if (!UseWebsocket) { AiProvider = GetAiSettings(); if (AiProvider == "cerebras") { interMediateResult = await _cerebrasAPIService.GetSimpleCerebrasResponse(sessionId, systemMessage, userPrompt); } else if (AiProvider == "chatgpt") { interMediateResult = await _openAIApiService.GetSimpleChatGPTResponse(sessionId, systemMessage, userPrompt); } else if (AiProvider == "deepseek") { interMediateResult = await _deepSeekApiService.GetSimpleChatGPTResponse(sessionId, systemMessage, userPrompt); } } else { interMediateResult = await _openAIRealtimeService.GetFullChatGPTResponseAsync(sessionId, systemMessage, userPrompt); } OnStatusChangeReceived?.Invoke(sessionId, "Mkay, I know now"); //Console.Write("GetExaminationResult: Result decision: " + interMediateResult); return interMediateResult; } /// /// What does the user want? Answer or action? /// /// /// public async Task GetJsonResultFromQuery(string sessionId, string userPrompt) { string rootpath = System.IO.Path.Combine(System.IO.Directory.GetCurrentDirectory(), "wwwroot/Documents/" + _scopedContentService.SelectedDocument); _apiKey = GetApiKey(); string extractedText = WordFileReader.ExtractText(rootpath); //Console.Write("GetJSONResult called!"); var systemMessage = "You are a helpful assistant. Respond strictly in " + _scopedContentService.SelectedLanguage + " as a JSON object in the following formats:\r\n\r\n1. " + "1. MethodResult:\r\n " + "- `type`: A string with value `methodresult`.\r\n " + "- `text`: A string explaining the result.\r\n " + "- `methodToCall`: One of these values: " + "[openContactForm, openCalendar, openApplicationForm].\r\n" + "- `parameter`: One of these: \r\n" + "[email address for openContactForm, calendlyUserName for openCalendar, empty string for openApplicationForm]" + "2. TextResult:\r\n " + "- `type`: A string with value `textresult`.\r\n " + "- `text`: Contains the user query without any modification.\r\n " + "3. ExaminationResult:\r\n " + "- `type`: A string with value `examinationresult`.\r\n " + "- `text`: Contains the user query without any modification.\r\n " + "4. ErrorResult:\r\n " + "- `type`: A string with value `errorresult`. \r\n " + "- `text`: The description of the problem you found. " + "**Document-Specific Instructions**:\r\n- Base responses solely on the following initial document: {" + extractedText + "}.\r\n" + "**Rules for Decision Making**:\r\n" + "- If the user’s input indicates a method invocation, and you find the relevant parameter in the initial document, generate a `methodresult`.\r\n" + " In the explanation, put a short sentence about what the user has requested by your understanding. \r\n" + "- If the user asks about the current content displayed for them, generate an examinationResult. \r\n" + "- If you don't find the relevant parameter in the initial document, generate an errorResult. \r\n" + //"- If the user asks for contact form but the initial document doesn't contain contact email, generate an errorResult. \r\n"+ "- Otherwise, create a `textresult` with the unmoddified user query.\r\n\r\n" + "Do NOT mark your answer with anything like `````json or such."; string interMediateResult = string.Empty; if (!UseWebsocket) { AiProvider = GetAiSettings(); if (AiProvider == "cerebras") { interMediateResult = await _cerebrasAPIService.GetSimpleCerebrasResponse(sessionId, systemMessage, userPrompt); } else if (AiProvider == "chatgpt") { interMediateResult = await _openAIApiService.GetSimpleChatGPTResponse(sessionId, systemMessage, userPrompt); } else if (AiProvider == "deepseek") { interMediateResult = await _deepSeekApiService.GetSimpleChatGPTResponse(sessionId, systemMessage, userPrompt); } } else { interMediateResult = await _openAIRealtimeService.GetFullChatGPTResponseAsync(sessionId, systemMessage, userPrompt); } //Console.Write("Result decision: " + interMediateResult); return interMediateResult; } public async Task GetJsonResultFromQuery(string sessionId, int siteId, string userPrompt) { //string rootpath = System.IO.Path.Combine(System.IO.Directory.GetCurrentDirectory(), "wwwroot/Documents/" + _contentService.SelectedDocument); //_apiKey = GetApiKey(); //start with embeddings float[] vector = []; OnStatusChangeReceived?.Invoke(sessionId, "Determining search vectors"); vector = await _openAIEmbeddingService.GenerateEmbeddingAsync(userPrompt); OnStatusChangeReceived?.Invoke(sessionId, "Looking up content in the knowledge database"); var pointId = await _qDrantService.QueryContentAsync(siteId, vector, 3); string extractedText = "Sections: "; if (pointId.Length > 0) { foreach (var item in pointId) { string qDrantData = await _qDrantService.GetContentAsync(siteId, item); QDrantGetContentPointResult selectedPoint = new QDrantGetContentPointResult(); if (qDrantData != null) { selectedPoint = JsonConvert.DeserializeObject(qDrantData)!; } extractedText += selectedPoint.result.payload.name + ": " + selectedPoint.result.payload.content + ", "; } } else { extractedText = "VECTOR ERROR: ZERO INFORMATION FOUND"; } _workingContent = extractedText.Replace("\"", "'"); //Console.Write("\r \n GetJsonResultFromQuery: Working content: " + _workingContent + "\r \n"); //string extractedText = WordFileReader.ExtractText(rootpath); //Console.Write("GetJSONResult called!"); string systemMessage = $"You are a helpful assistant built in a website, trying to figure out what the User wants to do or know about.\r\n" + "Your job is to classify the user's request into one of the following categories:\r\n" + "1. **Ask about or search infromation in the website’s content** (Return a 'Text result')\r\n" + "2. **Analyze the currently displayed HTML content** (Return an 'Examination result')\r\n" + "3. **Initiate an action** (Return a 'Method result')\r\n" + "If none of the above applies, return an 'Error result'.\r\n\r\n" + "**Response format:**\r\n" + "Strictly respond in " + _scopedContentService.SelectedLanguage + " as a JSON object, using one of the following formats:\r\n" + "1. **chatGPTMethodResult** (for initiating actions):\r\n" + " - `type`: \"methodresult\"\r\n" + " - `text`: A short explanation of what the user wants to do.\r\n" + " - `methodToCall`: One of: [openContactForm, openCalendar, openApplicationForm]\r\n" + " - `parameter`: [email address for openContactForm, calendlyUserName for openCalendar, empty string for openApplicationForm]\r\n\r\n" + "2. **chatGPTTextResult** (for general website content searches):\r\n" + " - `type`: \"textresult\"\r\n" + " - `text`: The user’s unmodified query.\r\n\r\n" + "3. **chatGPTExaminationResult** (for analyzing the currently displayed page only):\r\n" + " - `type`: \"examinationresult\"\r\n" + " - `text`: The user’s unmodified query.\r\n\r\n" + "4. **chatGPTErrorResult** (for errors):\r\n" + " - `type`: \"errorresult\"\r\n" + " - `text`: A description of the issue encountered.\r\n\r\n" + "**Decision Rules:**\r\n" + "- If the user is **searching for website content** beyond what is currently displayed (e.g., 'Find information about our services'), return a `textresult`.\r\n" + "- If the user is **asking about the currently visible content** (e.g., 'What is shown on the page?'), return an `examinationresult`.\r\n" + "- If the user wants to **perform an action**, return a `methodresult`.\r\n" + "- If the required parameter is missing, return an `errorresult`.\r\n\r\n" + "**Examples:**\r\n" + "- User asks: 'Show me information about pricing' → `textresult`\r\n" + "- User asks: 'What is displayed right now?' → `examinationresult`\r\n" + "- User asks: 'Open the contact form' → `methodresult`\r\n" + "- User asks: 'Contact support' but no email is found → `errorresult`\r\n\r\n" + "**Context:**\r\n" + "- Base responses on this initial document: {" + extractedText + "}\r\n" + "- Current displayed HTML: {" + _scopedContentService.CurrentDOM + "}\r\n" + "**IMPORTANT:**\r\n" + "- If the request is about general content, **DO NOT use 'examinationresult'**.\r\n" + "- If the request is about the currently displayed page, **DO NOT use 'textresult'**.\r\n" + "- Do NOT format the response with markdown, code blocks, or `json` tags, do not add any title, or explanation besides the plain json object"; string interMediateResult = string.Empty; if (!UseWebsocket) { AiProvider = GetAiSettings(); if (AiProvider == "cerebras") { interMediateResult = await _cerebrasAPIService.GetSimpleCerebrasResponse(sessionId, systemMessage, userPrompt); } else if (AiProvider == "chatgpt") { interMediateResult = await _openAIApiService.GetSimpleChatGPTResponse(sessionId, systemMessage, userPrompt); } else if (AiProvider == "deepseek") { interMediateResult = await _deepSeekApiService.GetSimpleChatGPTResponse(sessionId, systemMessage, userPrompt); } } else { interMediateResult = await _openAIRealtimeService.GetFullChatGPTResponseAsync(sessionId, systemMessage, userPrompt); } //Console.Write("\r \n GetJsonResultFromQuery: Result decision: " + interMediateResult + "\r \n"); return interMediateResult; } public async Task> GetSnippetsForDisplay(string sessionId, string collectionName) { _apiKey = GetApiKey(); OnStatusChangeReceived?.Invoke(sessionId, "Looking up the UI template elements for you"); //string availableSnippetList = ""; List snippets = new List(); var snippetscount = await _qDrantService.GetCollectionCount(collectionName); for (int j = 1; j <= snippetscount; j++) { var snippet = await _qDrantService.GetSnippetAsync(j, collectionName); QDrantGetPointResult x = JsonConvert.DeserializeObject(snippet); snippets.Add(new HtmlSnippet { Id = x.result.payload.Id, Name = x.result.payload.Name, Description = x.result.payload.Description, Type = x.result.payload.Type, Variant = x.result.payload.Variant, Tags = x.result.payload.Tags, Slots = x.result.payload.Slots, Html = x.result.payload.Html, SampleHtml = x.result.payload.SampleHtml }); //availableSnippetList += ("- " + x.result.payload.Name + ": " + x.result.payload.Description + ".\r\n"); } ////Console.Write(availableSnippetList); OnStatusChangeReceived?.Invoke(sessionId, "Loading UI elements from the design database"); //WTF???????? //var systemMessage = "You are a helpful assistant for generating responses using HTML templates. Analyze the user query and choose the most suitable snippet type(s) for rendering the response." + // "Respond with only the snippet name(s), comma-separated. No explanation. Identify the most suitable " + // "HTML code snippet type to use for rendering your response based on user queries. The available snippet types are: " + // availableSnippetList + // //"The content to answer from from is here: " + extractedText + ". " + // "If multiple snippets apply, list them in order of priority. "; //var userMessage = "How would you render the following text in html: " + interMediateResult + " ? "; //string result = string.Empty; //if (!UseWebsocket) //{ // AiProvider = GetAiSettings(); // if (AiProvider == "cerebras") // { // result = await _cerebrasAPIService.GetSimpleCerebrasResponse(sessionId, systemMessage, userMessage); // } // else if (AiProvider == "chatgpt") // { // result = await _openAIApiService.GetSimpleChatGPTResponse(sessionId, systemMessage, userMessage); // } // else if (AiProvider == "deepseek") // { // result = await _deepSeekApiService.GetSimpleChatGPTResponse(sessionId, systemMessage, userMessage); // } //} //else //{ // result = await _openAIRealtimeService.GetFullChatGPTResponseAsync(sessionId, systemMessage, userMessage); //} //////Console.Write(result); //List snippetList = result.Split(new[] { ',' }).ToList(); //int i = 0; //OnStatusChangeReceived?.Invoke(sessionId, "Oooh got it! Loading UI elements from the design database"); ////Console.Write("ChatGPT decided!!!! "); //let's send result to embeddingservice //List vectorsList = new List(); //foreach (var snippet in snippetList) //{ // var vectors = await _openAIEmbeddingService.GenerateEmbeddingAsync(result); // vectorsList.Add(vectors); //} //List pointIds = new List(); //var collectionCount = await _qDrantService.GetCollectionCount(collectionName); //if (collectionCount > 0) //{ // foreach (var vector in vectorsList) // { // var qDrantresult = await _qDrantService.QuerySnippetAsync(vector, 3, collectionName); // pointIds.Add(qDrantresult); // } // List qDrantDataList = new List(); // foreach (var pointId in pointIds) // { // var qDrantData = await _qDrantService.GetSnippetAsync(pointId, collectionName); // qDrantDataList.Add(qDrantData); // } // List selectedPointList = new List(); // if (qDrantDataList != null) // { // foreach (var x in qDrantDataList) // { // var selectedPoint = JsonConvert.DeserializeObject(x)!; // selectedPointList.Add(selectedPoint); // } // } // string htmlToUse = "Your snippets to use: "; // foreach (var selectedPoint in selectedPointList) // { // htmlToUse += selectedPoint.result.payload.Name + ": " + selectedPoint.result.payload.Html + ", "; // } // ////Console.Write(htmlToUse); // return htmlToUse; //} return snippets; } public async Task DisplayHtml(string sessionId, LayoutPlan layoutPlan, List htmlToUse, string[]? topics = null) { //for textresult and errorresult //Console.Write($"\n SessionId: {sessionId} \n"); OnStatusChangeReceived?.Invoke(sessionId, "Casting spells to draw customized UI"); //Console.WriteLine($"DISPLAYHTML Snippets: {htmlToUse.Count}\n\n"); //Console.WriteLine($"DISPLAYHTML Topics: {topics} \n\n"); StringBuilder lst = new StringBuilder("You are a helpful assistant generating HTML content in " + _scopedContentService.SelectedLanguage + " using Bootstrap. \n\n" + "### Rules to Follow: \n" + "- Please generate clean and structured HTML that goes between the menu and the footer of a Bootstrap5 webpage.\n" + "- DO NOT include ``, ``, or `