fix: update notification for response time issues not to show some su… (#588)

* fix: update notification for response time issues not to show some suggestions when using local port.

* fix: update notification messages for completion response time issues.

* fix: lint.
r0.4
Zhiming Ma 2023-10-19 12:33:56 +08:00 committed by GitHub
parent aacfd35464
commit 0dc7e98232
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 44 additions and 30 deletions

View File

@ -8,6 +8,7 @@ import com.intellij.openapi.application.invokeLater
import com.intellij.openapi.components.service
import com.intellij.openapi.diagnostic.Logger
import com.intellij.openapi.ui.Messages
import com.tabbyml.intellijtabby.agent.Agent
import com.tabbyml.intellijtabby.agent.AgentService
import com.tabbyml.intellijtabby.settings.ApplicationSettingsState
import kotlinx.coroutines.launch
@ -23,20 +24,16 @@ class CheckIssueDetail : AnAction() {
agentService.scope.launch {
val detail = agentService.getCurrentIssueDetail() ?: return@launch
val serverHealthState = agentService.getServerHealthState()
val settingsState = service<ApplicationSettingsState>().state.value
logger.info("Show issue detail: $detail, $serverHealthState, $settingsState")
val agentConfig = agentService.getConfig()
logger.info("Show issue detail: $detail, $serverHealthState, $agentConfig")
val title = when (detail["name"]) {
"slowCompletionResponseTime" -> "Completion Requests Appear to Take Too Much Time"
"highCompletionTimeoutRate" -> "Most Completion Requests Timed Out"
else -> return@launch
}
val message = buildDetailMessage(detail, serverHealthState, settingsState)
val message = buildDetailMessage(detail, serverHealthState, agentConfig)
invokeLater {
val result =
Messages.showOkCancelDialog(message, title, "Supported Models", "Dismiss", Messages.getInformationIcon())
if (result == Messages.OK) {
BrowserUtil.browse("https://tabby.tabbyml.com/docs/models/")
}
Messages.showMessageDialog(message, title, Messages.getInformationIcon())
}
}
}
@ -44,7 +41,7 @@ class CheckIssueDetail : AnAction() {
private fun buildDetailMessage(
detail: Map<String, Any>,
serverHealthState: Map<String, Any>?,
settingsState: ApplicationSettingsState.State
agentConfig: Agent.Config
): String {
val stats = detail["completionResponseStats"] as Map<*, *>?
val statsMessages = when (detail["name"]) {
@ -72,19 +69,19 @@ class CheckIssueDetail : AnAction() {
val helpMessageForRunningLargeModelOnCPU = if (device == "cpu" && model.endsWith("B")) {
"""
Your Tabby server is running model <i>$model</i> on CPU.
This model is too large to run on CPU, please try a smaller model or switch to GPU.
You can find supported model list in online documents.
This model may be performing poorly due to its large parameter size, please consider trying smaller models or switch to GPU.
You can find a list of supported models in the <a href='https://tabby.tabbyml.com/docs/models/'>model directory</a>.
""".trimIndent()
} else {
""
}
var commonHelpMessage = ""
val host = URL(settingsState.serverEndpoint).host
val host = URL(agentConfig.server?.endpoint).host
if (helpMessageForRunningLargeModelOnCPU.isEmpty()) {
commonHelpMessage += "<li>The running model <i>$model</i> is too large to run on your Tabby server.<br/>"
commonHelpMessage += "Please try a smaller model. You can find supported model list in online documents.</li>"
commonHelpMessage += "<li>The running model <i>$model</i> may be performing poorly due to its large parameter size.<br/>"
commonHelpMessage += "Please consider trying smaller models. You can find a list of supported models in the <a href='https://tabby.tabbyml.com/docs/models/'>model directory</a>.</li>"
}
if (!(host == "localhost" || host == "127.0.0.1")) {
if (!(host.startsWith("localhost") || host.startsWith("127.0.0.1"))) {
commonHelpMessage += "<li>A poor network connection. Please check your network and proxy settings.</li>"
commonHelpMessage += "<li>Server overload. Please contact your Tabby server administrator for assistance.</li>"
}

View File

@ -202,6 +202,11 @@ class AgentService : Disposable {
agent.clearConfig(key)
}
suspend fun getConfig(): Agent.Config {
waitForInitialized()
return agent.getConfig()
}
suspend fun provideCompletion(editor: Editor, offset: Int, manually: Boolean = false): Agent.CompletionResponse? {
waitForInitialized()
return ReadAction.compute<PsiFile, Throwable> {

View File

@ -7,7 +7,7 @@
"repository": "https://github.com/TabbyML/tabby",
"bugs": "https://github.com/TabbyML/tabby/issues",
"license": "Apache-2.0",
"version": "0.6.1",
"version": "1.0.0-dev",
"keywords": [
"ai",
"autocomplete",

View File

@ -138,21 +138,33 @@ function getHelpMessageForCompletionResponseTimeIssue() {
if (serverHealthState?.device === "cpu" && serverHealthState?.model?.match(/[0-9\.]+B$/)) {
helpMessageForRunningLargeModelOnCPU +=
`Your Tabby server is running model ${serverHealthState?.model} on CPU. ` +
"This model is too large to run on CPU, please try a smaller model or switch to GPU. " +
"You can find supported model list in online documents. \n";
"This model may be performing poorly due to its large parameter size, please consider trying smaller models or switch to GPU. " +
"You can find a list of supported models in the model directory.\n";
}
let commonHelpMessage = "";
const host = new URL(agent().getConfig().server.endpoint).host;
if (helpMessageForRunningLargeModelOnCPU.length == 0) {
commonHelpMessage += ` - The running model ${
serverHealthState?.model ?? ""
} may be performing poorly due to its large parameter size. `;
commonHelpMessage +=
"Please consider trying smaller models. You can find a list of supported models in the model directory.\n";
}
if (!(host.startsWith("localhost") || host.startsWith("127.0.0.1"))) {
commonHelpMessage += " - A poor network connection. Please check your network and proxy settings.\n";
commonHelpMessage += " - Server overload. Please contact your Tabby server administrator for assistance.\n";
}
let message = "";
if (helpMessageForRunningLargeModelOnCPU.length > 0) {
message += helpMessageForRunningLargeModelOnCPU + "\n";
message += "Other possible causes of this issue are: \n";
if (commonHelpMessage.length > 0) {
message += "Other possible causes of this issue: \n";
message += commonHelpMessage;
}
} else {
message += "Possible causes of this issue are: \n";
}
message += " - A poor network connection. Please check your network and proxy settings.\n";
message += " - Server overload. Please contact your Tabby server administrator for assistance.\n";
if (helpMessageForRunningLargeModelOnCPU.length == 0) {
message += ` - The running model ${serverHealthState?.model ?? ""} is too large to run on your Tabby server. `;
message += "Please try a smaller model. You can find supported model list in online documents.\n";
// commonHelpMessage should not be empty here
message += "Possible causes of this issue: \n";
message += commonHelpMessage;
}
return message;
}
@ -173,11 +185,11 @@ function showInformationWhenSlowCompletionResponseTime(modal: boolean = false) {
modal: true,
detail: statsMessage + getHelpMessageForCompletionResponseTimeIssue(),
},
"Supported Models",
"Model Directory",
)
.then((selection) => {
switch (selection) {
case "Supported Models":
case "Model Directory":
env.openExternal(Uri.parse("https://tabby.tabbyml.com/docs/models/"));
break;
}
@ -212,11 +224,11 @@ function showInformationWhenHighCompletionTimeoutRate(modal: boolean = false) {
modal: true,
detail: statsMessage + getHelpMessageForCompletionResponseTimeIssue(),
},
"Supported Models",
"Model Directory",
)
.then((selection) => {
switch (selection) {
case "Supported Models":
case "Model Directory":
env.openExternal(Uri.parse("https://tabby.tabbyml.com/docs/models/"));
break;
}