File indexing completed on 2025-01-05 05:18:57
0001 // SPDX-FileCopyrightText: 2023 Loren Burkholder <computersemiexpert@outlook.com> 0002 // SPDX-FileCopyrightText: 2023 Klarälvdalens Datakonsult AB, a KDAB Group company <info@kdab.com> 0003 // 0004 // SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL 0005 0006 #pragma once 0007 0008 #include "kllmcore_export.h" 0009 0010 #include "KLLMReply.h" 0011 #include "KLLMRequest.h" 0012 class QNetworkAccessManager; 0013 namespace KLLMCore 0014 { 0015 /** 0016 * @brief The KLLMInterface class provides an interface around the LLM API. 0017 * 0018 * KLLM is designed to feel similar to Qt Network. This means that all LLM requests must be routed through a central 0019 * KLLMInterface object. 0020 * 0021 * To request a message or completion from the LLM, first create a KLLMRequest object with the desired message. Choose the 0022 * model from models() that you wish to use for this request and set it on the KLLMRequest. Then call getCompletion(), 0023 * passing your KLLMRequest object. You will recieve a KLLMReply object; connect to KLLMReply::contentAdded() if you wish to 0024 * receive content updates as they arrive from the server or connect to KLLMReply::finished() if you prefer to have the whole 0025 * message delivered at once. 0026 * 0027 * You should not request any completions (or otherwise use this class) until ready() returns true. Using the interface 0028 * before it is ready can cause problems; for example, the interface may not have loaded the available models yet or the 0029 * backend could be unreachable. 0030 */ 0031 class KLLMCORE_EXPORT KLLMInterface : public QObject 0032 { 0033 Q_OBJECT 0034 0035 Q_PROPERTY(bool ready READ ready NOTIFY readyChanged FINAL) 0036 Q_PROPERTY(bool hasError READ hasError NOTIFY hasErrorChanged FINAL) 0037 Q_PROPERTY(QStringList models READ models NOTIFY modelsChanged FINAL) 0038 Q_PROPERTY(QString ollamaUrl READ ollamaUrl WRITE setOllamaUrl NOTIFY ollamaUrlChanged FINAL) 0039 Q_PROPERTY(QString systemPrompt READ systemPrompt WRITE setSystemPrompt NOTIFY systemPromptChanged FINAL) 0040 0041 public: 0042 /** 0043 * @brief Creates a KLLMInterface. 0044 * @param parent The parent QObject. 0045 */ 0046 explicit KLLMInterface(QObject *parent = nullptr); 0047 0048 /** 0049 * @brief Creates a KLLMInterface with the url set to \a ollamaUrl. 0050 * @param ollamaUrl The URL to the Ollama instance. 0051 * @param parent The parent QObject. 0052 */ 0053 explicit KLLMInterface(const QString &ollamaUrl, QObject *parent = nullptr); 0054 0055 /** 0056 * @brief Creates a KLLMInterface with the url set to \a ollamaUrl. 0057 * @param ollamaUrl The URL to the Ollama instance. 0058 * @param parent The parent QObject. 0059 */ 0060 explicit KLLMInterface(const QUrl &ollamaUrl, QObject *parent = nullptr); 0061 0062 /** 0063 * @brief Check whether the interface is ready. 0064 * 0065 * You should not use the interface until ready() returns true. Failure to observe this rule may result in undefined behavior. 0066 * 0067 * If the interface encounters an error, ready() will return false. However, do not use ready() to indicate to the user that the interface is in an error 0068 * state, as the interface could be in the process of making its initial connection. Instead, you should use hasError() to check for an error state. 0069 * Additionally, you should connect to errorOccurred() to handle errors as they arise. 0070 * 0071 * @return Returns whether the interface is ready. 0072 */ 0073 [[nodiscard]] bool ready() const; 0074 0075 /** 0076 * @brief Check whether the interface is in an error state. 0077 * 0078 * After you handle an error from errorEmitted(), you should monitor this property. When it becomes \c false, you can safely resume operations. 0079 * 0080 * @return Returns whether the interface is in an error state. 0081 */ 0082 [[nodiscard]] bool hasError() const; 0083 0084 /** 0085 * @brief Retrieve a list of models supported by the LLM backend. 0086 * 0087 * When creating a KLLMRequest, you should choose a model from this list for the request. If you do not specify a model, 0088 * the request will probably fail. 0089 * 0090 * @return Returns a QStringList containing all valid models for this interface. 0091 */ 0092 [[nodiscard]] QStringList models() const; 0093 0094 /** 0095 * @brief Get the URL to the Ollama instance. 0096 * @return The URL for the Ollama instance. 0097 */ 0098 [[nodiscard]] QString ollamaUrl() const; 0099 0100 /** 0101 * @brief Set the URL to the Ollama instance. 0102 * 0103 * Since Ollama is a self-hostable service, users may wish to use different instances. Use this function to set the URL to the desired instance. It should 0104 * \a not contain the \c /api portion of the URL. 0105 * 0106 * @param ollamaUrl The new URL for the Ollama instance. 0107 */ 0108 void setOllamaUrl(const QString &ollamaUrl); 0109 0110 /** 0111 * @brief A convenience overload of setOllamaUrl() that takes a QUrl. 0112 * @param ollamaUrl The new URL for the Ollama instance. 0113 */ 0114 void setOllamaUrl(const QUrl &ollamaUrl); 0115 0116 /** 0117 * @brief Get the system prompt for the LLM. 0118 * @return The system prompt string. 0119 */ 0120 [[nodiscard]] QString systemPrompt() const; 0121 0122 /** 0123 * @brief Set the system prompt for the LLM. 0124 * 0125 * LLMs can take system prompts that instruct them on how they should generally behave in a conversation. This could be anything from how they speak to what 0126 * types of information they prefer to present. You can set a system prompt here to better cater to your users. 0127 * 0128 * @param systemPrompt The system prompt for the LLM. 0129 */ 0130 void setSystemPrompt(const QString &systemPrompt); 0131 0132 public Q_SLOTS: 0133 /** 0134 * @brief Request a completion from the LLM. 0135 * 0136 * Calling this function starts a request to the LLM backend. You should use the returned KLLMReply pointer to track the 0137 * status of the LLM's response. Once the KLLMReply emits KLLMReply::finished(), it is your responsibility to either 0138 * track or delete the KLLMReply; auto-deleting is not implemented yet. 0139 * 0140 * @param request The request object that will be used to create the actual LLM request. 0141 * @return Returns a pointer to a KLLMReply that can be used to track the progress of the reply. 0142 */ 0143 KLLMReply *getCompletion(const KLLMRequest &request); 0144 0145 /** 0146 * @brief Reload the LLM interface. 0147 * 0148 * Reloading the interface can be used to check if a network error is gone or to see if the available models have changed. 0149 */ 0150 void reload(); 0151 0152 Q_SIGNALS: 0153 /** 0154 * @brief This signal is emitted when any completion requested by the interface is completed. 0155 * @param replyText Contains the text of the completion. 0156 */ 0157 void finished(const QString &replyText); 0158 0159 void readyChanged(); 0160 void hasErrorChanged(); 0161 void modelsChanged(); 0162 void ollamaUrlChanged(); 0163 void systemPromptChanged(); 0164 0165 /** 0166 * @brief An error occurred while communicating with the interface. 0167 * @param message Contains the human readable error message. 0168 */ 0169 void errorOccurred(const QString &message); 0170 0171 private: 0172 QNetworkAccessManager *const m_manager; 0173 QStringList m_models; 0174 bool m_ready = false; 0175 bool m_hasError = false; 0176 QString m_ollamaUrl; 0177 QString m_systemPrompt; 0178 QMetaObject::Connection m_ollamaCheck; 0179 }; 0180 }