mirror of
https://gitee.com/dify_ai/dify.git
synced 2025-12-06 11:29:30 +08:00
feat(stress-test): add comprehensive stress testing suite using Locust (#25617)
Signed-off-by: -LAN- <laipz8200@outlook.com> Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
This commit is contained in:
4
.gitignore
vendored
4
.gitignore
vendored
@@ -227,3 +227,7 @@ web/public/fallback-*.js
|
||||
.roo/
|
||||
api/.env.backup
|
||||
/clickzetta
|
||||
|
||||
# Benchmark
|
||||
scripts/stress-test/setup/config/
|
||||
scripts/stress-test/reports/
|
||||
@@ -1,12 +1,14 @@
|
||||
import os
|
||||
from collections import OrderedDict
|
||||
from collections.abc import Callable
|
||||
from functools import lru_cache
|
||||
from typing import TypeVar
|
||||
|
||||
from configs import dify_config
|
||||
from core.tools.utils.yaml_utils import load_yaml_file
|
||||
from core.tools.utils.yaml_utils import load_yaml_file_cached
|
||||
|
||||
|
||||
@lru_cache(maxsize=128)
|
||||
def get_position_map(folder_path: str, *, file_name: str = "_position.yaml") -> dict[str, int]:
|
||||
"""
|
||||
Get the mapping from name to index from a YAML file
|
||||
@@ -14,12 +16,17 @@ def get_position_map(folder_path: str, *, file_name: str = "_position.yaml") ->
|
||||
:param file_name: the YAML file name, default to '_position.yaml'
|
||||
:return: a dict with name as key and index as value
|
||||
"""
|
||||
# FIXME(-LAN-): Cache position maps to prevent file descriptor exhaustion during high-load benchmarks
|
||||
position_file_path = os.path.join(folder_path, file_name)
|
||||
yaml_content = load_yaml_file(file_path=position_file_path, default_value=[])
|
||||
try:
|
||||
yaml_content = load_yaml_file_cached(file_path=position_file_path)
|
||||
except Exception:
|
||||
yaml_content = []
|
||||
positions = [item.strip() for item in yaml_content if item and isinstance(item, str) and item.strip()]
|
||||
return {name: index for index, name in enumerate(positions)}
|
||||
|
||||
|
||||
@lru_cache(maxsize=128)
|
||||
def get_tool_position_map(folder_path: str, file_name: str = "_position.yaml") -> dict[str, int]:
|
||||
"""
|
||||
Get the mapping for tools from name to index from a YAML file.
|
||||
@@ -35,20 +42,6 @@ def get_tool_position_map(folder_path: str, file_name: str = "_position.yaml") -
|
||||
)
|
||||
|
||||
|
||||
def get_provider_position_map(folder_path: str, file_name: str = "_position.yaml") -> dict[str, int]:
|
||||
"""
|
||||
Get the mapping for providers from name to index from a YAML file.
|
||||
:param folder_path:
|
||||
:param file_name: the YAML file name, default to '_position.yaml'
|
||||
:return: a dict with name as key and index as value
|
||||
"""
|
||||
position_map = get_position_map(folder_path, file_name=file_name)
|
||||
return pin_position_map(
|
||||
position_map,
|
||||
pin_list=dify_config.POSITION_PROVIDER_PINS_LIST,
|
||||
)
|
||||
|
||||
|
||||
def pin_position_map(original_position_map: dict[str, int], pin_list: list[str]) -> dict[str, int]:
|
||||
"""
|
||||
Pin the items in the pin list to the beginning of the position map.
|
||||
|
||||
@@ -1,14 +1,10 @@
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
from collections.abc import Sequence
|
||||
from threading import Lock
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
import contexts
|
||||
from core.helper.position_helper import get_provider_position_map, sort_to_dict_by_position_map
|
||||
from core.model_runtime.entities.model_entities import AIModelEntity, ModelType
|
||||
from core.model_runtime.entities.provider_entities import ProviderConfig, ProviderEntity, SimpleProviderEntity
|
||||
from core.model_runtime.model_providers.__base.ai_model import AIModel
|
||||
@@ -28,48 +24,20 @@ from core.plugin.impl.model import PluginModelClient
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ModelProviderExtension(BaseModel):
|
||||
plugin_model_provider_entity: PluginModelProviderEntity
|
||||
position: Optional[int] = None
|
||||
|
||||
|
||||
class ModelProviderFactory:
|
||||
provider_position_map: dict[str, int]
|
||||
|
||||
def __init__(self, tenant_id: str):
|
||||
self.provider_position_map = {}
|
||||
|
||||
self.tenant_id = tenant_id
|
||||
self.plugin_model_manager = PluginModelClient()
|
||||
|
||||
if not self.provider_position_map:
|
||||
# get the path of current classes
|
||||
current_path = os.path.abspath(__file__)
|
||||
model_providers_path = os.path.dirname(current_path)
|
||||
|
||||
# get _position.yaml file path
|
||||
self.provider_position_map = get_provider_position_map(model_providers_path)
|
||||
|
||||
def get_providers(self) -> Sequence[ProviderEntity]:
|
||||
"""
|
||||
Get all providers
|
||||
:return: list of providers
|
||||
"""
|
||||
# Fetch plugin model providers
|
||||
# FIXME(-LAN-): Removed position map sorting since providers are fetched from plugin server
|
||||
# The plugin server should return providers in the desired order
|
||||
plugin_providers = self.get_plugin_model_providers()
|
||||
|
||||
# Convert PluginModelProviderEntity to ModelProviderExtension
|
||||
model_provider_extensions = []
|
||||
for provider in plugin_providers:
|
||||
model_provider_extensions.append(ModelProviderExtension(plugin_model_provider_entity=provider))
|
||||
|
||||
sorted_extensions = sort_to_dict_by_position_map(
|
||||
position_map=self.provider_position_map,
|
||||
data=model_provider_extensions,
|
||||
name_func=lambda x: x.plugin_model_provider_entity.declaration.provider,
|
||||
)
|
||||
|
||||
return [extension.plugin_model_provider_entity.declaration for extension in sorted_extensions.values()]
|
||||
return [provider.declaration for provider in plugin_providers]
|
||||
|
||||
def get_plugin_model_providers(self) -> Sequence[PluginModelProviderEntity]:
|
||||
"""
|
||||
|
||||
@@ -18,7 +18,7 @@ from core.tools.entities.values import ToolLabelEnum, default_tool_label_dict
|
||||
from core.tools.errors import (
|
||||
ToolProviderNotFoundError,
|
||||
)
|
||||
from core.tools.utils.yaml_utils import load_yaml_file
|
||||
from core.tools.utils.yaml_utils import load_yaml_file_cached
|
||||
|
||||
|
||||
class BuiltinToolProviderController(ToolProviderController):
|
||||
@@ -31,7 +31,7 @@ class BuiltinToolProviderController(ToolProviderController):
|
||||
provider = self.__class__.__module__.split(".")[-1]
|
||||
yaml_path = path.join(path.dirname(path.realpath(__file__)), "providers", provider, f"{provider}.yaml")
|
||||
try:
|
||||
provider_yaml = load_yaml_file(yaml_path, ignore_error=False)
|
||||
provider_yaml = load_yaml_file_cached(yaml_path)
|
||||
except Exception as e:
|
||||
raise ToolProviderNotFoundError(f"can not load provider yaml for {provider}: {e}")
|
||||
|
||||
@@ -71,7 +71,7 @@ class BuiltinToolProviderController(ToolProviderController):
|
||||
for tool_file in tool_files:
|
||||
# get tool name
|
||||
tool_name = tool_file.split(".")[0]
|
||||
tool = load_yaml_file(path.join(tool_path, tool_file), ignore_error=False)
|
||||
tool = load_yaml_file_cached(path.join(tool_path, tool_file))
|
||||
|
||||
# get tool class, import the module
|
||||
assistant_tool_class: type = load_single_subclass_from_source(
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import logging
|
||||
from functools import lru_cache
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
@@ -8,28 +9,25 @@ from yaml import YAMLError
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def load_yaml_file(file_path: str, ignore_error: bool = True, default_value: Any = {}):
|
||||
"""
|
||||
Safe loading a YAML file
|
||||
:param file_path: the path of the YAML file
|
||||
:param ignore_error:
|
||||
if True, return default_value if error occurs and the error will be logged in debug level
|
||||
if False, raise error if error occurs
|
||||
:param default_value: the value returned when errors ignored
|
||||
:return: an object of the YAML content
|
||||
"""
|
||||
def _load_yaml_file(*, file_path: str):
|
||||
if not file_path or not Path(file_path).exists():
|
||||
if ignore_error:
|
||||
return default_value
|
||||
else:
|
||||
raise FileNotFoundError(f"File not found: {file_path}")
|
||||
raise FileNotFoundError(f"File not found: {file_path}")
|
||||
|
||||
with open(file_path, encoding="utf-8") as yaml_file:
|
||||
try:
|
||||
yaml_content = yaml.safe_load(yaml_file)
|
||||
return yaml_content or default_value
|
||||
return yaml_content
|
||||
except Exception as e:
|
||||
if ignore_error:
|
||||
return default_value
|
||||
else:
|
||||
raise YAMLError(f"Failed to load YAML file {file_path}: {e}") from e
|
||||
raise YAMLError(f"Failed to load YAML file {file_path}: {e}") from e
|
||||
|
||||
|
||||
@lru_cache(maxsize=128)
|
||||
def load_yaml_file_cached(file_path: str) -> Any:
|
||||
"""
|
||||
Cached version of load_yaml_file for static configuration files.
|
||||
Only use for files that don't change during runtime (e.g., position files)
|
||||
|
||||
:param file_path: the path of the YAML file
|
||||
:return: an object of the YAML content
|
||||
"""
|
||||
return _load_yaml_file(file_path=file_path)
|
||||
|
||||
@@ -168,6 +168,8 @@ dev = [
|
||||
"types-redis>=4.6.0.20241004",
|
||||
"celery-types>=0.23.0",
|
||||
"mypy~=1.17.1",
|
||||
"locust>=2.40.4",
|
||||
"sseclient-py>=1.8.0",
|
||||
]
|
||||
|
||||
############################################################
|
||||
|
||||
@@ -3,7 +3,7 @@ from textwrap import dedent
|
||||
import pytest
|
||||
from yaml import YAMLError
|
||||
|
||||
from core.tools.utils.yaml_utils import load_yaml_file
|
||||
from core.tools.utils.yaml_utils import _load_yaml_file
|
||||
|
||||
EXAMPLE_YAML_FILE = "example_yaml.yaml"
|
||||
INVALID_YAML_FILE = "invalid_yaml.yaml"
|
||||
@@ -56,15 +56,15 @@ def prepare_invalid_yaml_file(tmp_path, monkeypatch) -> str:
|
||||
|
||||
|
||||
def test_load_yaml_non_existing_file():
|
||||
assert load_yaml_file(file_path=NON_EXISTING_YAML_FILE) == {}
|
||||
assert load_yaml_file(file_path="") == {}
|
||||
with pytest.raises(FileNotFoundError):
|
||||
_load_yaml_file(file_path=NON_EXISTING_YAML_FILE)
|
||||
|
||||
with pytest.raises(FileNotFoundError):
|
||||
load_yaml_file(file_path=NON_EXISTING_YAML_FILE, ignore_error=False)
|
||||
_load_yaml_file(file_path="")
|
||||
|
||||
|
||||
def test_load_valid_yaml_file(prepare_example_yaml_file):
|
||||
yaml_data = load_yaml_file(file_path=prepare_example_yaml_file)
|
||||
yaml_data = _load_yaml_file(file_path=prepare_example_yaml_file)
|
||||
assert len(yaml_data) > 0
|
||||
assert yaml_data["age"] == 30
|
||||
assert yaml_data["gender"] == "male"
|
||||
@@ -77,7 +77,4 @@ def test_load_valid_yaml_file(prepare_example_yaml_file):
|
||||
def test_load_invalid_yaml_file(prepare_invalid_yaml_file):
|
||||
# yaml syntax error
|
||||
with pytest.raises(YAMLError):
|
||||
load_yaml_file(file_path=prepare_invalid_yaml_file, ignore_error=False)
|
||||
|
||||
# ignore error
|
||||
assert load_yaml_file(file_path=prepare_invalid_yaml_file) == {}
|
||||
_load_yaml_file(file_path=prepare_invalid_yaml_file)
|
||||
|
||||
238
api/uv.lock
generated
238
api/uv.lock
generated
@@ -538,6 +538,15 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/57/f4/a69c20ee4f660081a7dedb1ac57f29be9378e04edfcb90c526b923d4bebc/beautifulsoup4-4.12.2-py3-none-any.whl", hash = "sha256:bd2520ca0d9d7d12694a53d44ac482d181b4ec1888909b035a3dbf40d0f57d4a", size = 142979, upload-time = "2023-04-07T15:02:50.77Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bidict"
|
||||
version = "0.23.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/9a/6e/026678aa5a830e07cd9498a05d3e7e650a4f56a42f267a53d22bcda1bdc9/bidict-0.23.1.tar.gz", hash = "sha256:03069d763bc387bbd20e7d49914e75fc4132a41937fa3405417e1a5a2d006d71", size = 29093, upload-time = "2024-02-18T19:09:05.748Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/99/37/e8730c3587a65eb5645d4aba2d27aae48e8003614d6aaf15dda67f702f1f/bidict-0.23.1-py3-none-any.whl", hash = "sha256:5dae8d4d79b552a71cbabc7deb25dfe8ce710b17ff41711e13010ead2abfc3e5", size = 32764, upload-time = "2024-02-18T19:09:04.156Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "billiard"
|
||||
version = "4.2.1"
|
||||
@@ -1061,6 +1070,15 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/a7/06/3d6badcf13db419e25b07041d9c7b4a2c331d3f4e7134445ec5df57714cd/coloredlogs-15.0.1-py2.py3-none-any.whl", hash = "sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934", size = 46018, upload-time = "2021-06-11T10:22:42.561Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "configargparse"
|
||||
version = "1.7.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/85/4d/6c9ef746dfcc2a32e26f3860bb4a011c008c392b83eabdfb598d1a8bbe5d/configargparse-1.7.1.tar.gz", hash = "sha256:79c2ddae836a1e5914b71d58e4b9adbd9f7779d4e6351a637b7d2d9b6c46d3d9", size = 43958, upload-time = "2025-05-23T14:26:17.369Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/31/28/d28211d29bcc3620b1fece85a65ce5bb22f18670a03cd28ea4b75ede270c/configargparse-1.7.1-py3-none-any.whl", hash = "sha256:8b586a31f9d873abd1ca527ffbe58863c99f36d896e2829779803125e83be4b6", size = 25607, upload-time = "2025-05-23T14:26:15.923Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cos-python-sdk-v5"
|
||||
version = "1.9.30"
|
||||
@@ -1357,6 +1375,7 @@ dev = [
|
||||
{ name = "dotenv-linter" },
|
||||
{ name = "faker" },
|
||||
{ name = "hypothesis" },
|
||||
{ name = "locust" },
|
||||
{ name = "lxml-stubs" },
|
||||
{ name = "mypy" },
|
||||
{ name = "pandas-stubs" },
|
||||
@@ -1367,6 +1386,7 @@ dev = [
|
||||
{ name = "pytest-mock" },
|
||||
{ name = "ruff" },
|
||||
{ name = "scipy-stubs" },
|
||||
{ name = "sseclient-py" },
|
||||
{ name = "testcontainers" },
|
||||
{ name = "ty" },
|
||||
{ name = "types-aiofiles" },
|
||||
@@ -1549,6 +1569,7 @@ dev = [
|
||||
{ name = "dotenv-linter", specifier = "~=0.5.0" },
|
||||
{ name = "faker", specifier = "~=32.1.0" },
|
||||
{ name = "hypothesis", specifier = ">=6.131.15" },
|
||||
{ name = "locust", specifier = ">=2.40.4" },
|
||||
{ name = "lxml-stubs", specifier = "~=0.5.1" },
|
||||
{ name = "mypy", specifier = "~=1.17.1" },
|
||||
{ name = "pandas-stubs", specifier = "~=2.2.3" },
|
||||
@@ -1559,6 +1580,7 @@ dev = [
|
||||
{ name = "pytest-mock", specifier = "~=3.14.0" },
|
||||
{ name = "ruff", specifier = "~=0.12.3" },
|
||||
{ name = "scipy-stubs", specifier = ">=1.15.3.0" },
|
||||
{ name = "sseclient-py", specifier = ">=1.8.0" },
|
||||
{ name = "testcontainers", specifier = "~=4.10.0" },
|
||||
{ name = "ty", specifier = "~=0.0.1a19" },
|
||||
{ name = "types-aiofiles", specifier = "~=24.1.0" },
|
||||
@@ -2036,6 +2058,58 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/11/b2/5d20664ef6a077bec9f27f7a7ee761edc64946d0b1e293726a3d074a9a18/gevent-24.11.1-cp312-cp312-win_amd64.whl", hash = "sha256:68bee86b6e1c041a187347ef84cf03a792f0b6c7238378bf6ba4118af11feaae", size = 1541631, upload-time = "2024-11-11T14:55:34.977Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "geventhttpclient"
|
||||
version = "2.3.4"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "brotli" },
|
||||
{ name = "certifi" },
|
||||
{ name = "gevent" },
|
||||
{ name = "urllib3" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/89/19/1ca8de73dcc0596d3df01be299e940d7fc3bccbeb6f62bb8dd2d427a3a50/geventhttpclient-2.3.4.tar.gz", hash = "sha256:1749f75810435a001fc6d4d7526c92cf02b39b30ab6217a886102f941c874222", size = 83545, upload-time = "2025-06-11T13:18:14.144Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/3d/c7/c4c31bd92b08c4e34073c722152b05c48c026bc6978cf04f52be7e9050d5/geventhttpclient-2.3.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fb8f6a18f1b5e37724111abbd3edf25f8f00e43dc261b11b10686e17688d2405", size = 71919, upload-time = "2025-06-11T13:16:49.796Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9d/8a/4565e6e768181ecb06677861d949b3679ed29123b6f14333e38767a17b5a/geventhttpclient-2.3.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:dbb28455bb5d82ca3024f9eb7d65c8ff6707394b584519def497b5eb9e5b1222", size = 52577, upload-time = "2025-06-11T13:16:50.657Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/02/a1/fb623cf478799c08f95774bc41edb8ae4c2f1317ae986b52f233d0f3fa05/geventhttpclient-2.3.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:96578fc4a5707b5535d1c25a89e72583e02aafe64d14f3b4d78f9c512c6d613c", size = 51981, upload-time = "2025-06-11T13:16:52.586Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/18/b2/a4ddd3d24c8aa064b19b9f180eb5e1517248518289d38af70500569ebedf/geventhttpclient-2.3.4-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:19721357db976149ccf54ac279eab8139da8cdf7a11343fd02212891b6f39677", size = 114287, upload-time = "2025-08-24T12:16:47.101Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a1/cc/caac4d4bd2c72d53836dbf50018aed3747c0d0c6f1d08175a785083d9d36/geventhttpclient-2.3.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ecf830cdcd1d4d28463c8e0c48f7f5fb06f3c952fff875da279385554d1d4d65", size = 115208, upload-time = "2025-08-24T12:16:48.108Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/04/a2/8278bd4d16b9df88bd538824595b7b84efd6f03c7b56b2087d09be838e02/geventhttpclient-2.3.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:47dbf8a163a07f83b38b0f8a35b85e5d193d3af4522ab8a5bbecffff1a4cd462", size = 121101, upload-time = "2025-08-24T12:16:49.417Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e3/0e/a9ebb216140bd0854007ff953094b2af983cdf6d4aec49796572fcbf2606/geventhttpclient-2.3.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e39ad577b33a5be33b47bff7c2dda9b19ced4773d169d6555777cd8445c13c0", size = 118494, upload-time = "2025-06-11T13:16:54.172Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4f/95/6d45dead27e4f5db7a6d277354b0e2877c58efb3cd1687d90a02d5c7b9cd/geventhttpclient-2.3.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:110d863baf7f0a369b6c22be547c5582e87eea70ddda41894715c870b2e82eb0", size = 123860, upload-time = "2025-06-11T13:16:55.824Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/70/a1/4baa8dca3d2df94e6ccca889947bb5929aca5b64b59136bbf1779b5777ba/geventhttpclient-2.3.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:226d9fca98469bd770e3efd88326854296d1aa68016f285bd1a2fb6cd21e17ee", size = 114969, upload-time = "2025-06-11T13:16:58.02Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ab/48/123fa67f6fca14c557332a168011565abd9cbdccc5c8b7ed76d9a736aeb2/geventhttpclient-2.3.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:71dbc6d4004017ef88c70229809df4ad2317aad4876870c0b6bcd4d6695b7a8d", size = 113311, upload-time = "2025-06-11T13:16:59.423Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/93/e4/8a467991127ca6c53dd79a8aecb26a48207e7e7976c578fb6eb31378792c/geventhttpclient-2.3.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ed35391ad697d6cda43c94087f59310f028c3e9fb229e435281a92509469c627", size = 111154, upload-time = "2025-06-11T13:17:01.139Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/11/e7/cca0663d90bc8e68592a62d7b28148eb9fd976f739bb107e4c93f9ae6d81/geventhttpclient-2.3.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:97cd2ab03d303fd57dea4f6d9c2ab23b7193846f1b3bbb4c80b315ebb5fc8527", size = 112532, upload-time = "2025-06-11T13:17:03.729Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/02/98/625cee18a3be5f7ca74c612d4032b0c013b911eb73c7e72e06fa56a44ba2/geventhttpclient-2.3.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:ec4d1aa08569b7eb075942caeacabefee469a0e283c96c7aac0226d5e7598fe8", size = 117806, upload-time = "2025-06-11T13:17:05.138Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f1/5e/e561a5f8c9d98b7258685355aacb9cca8a3c714190cf92438a6e91da09d5/geventhttpclient-2.3.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:93926aacdb0f4289b558f213bc32c03578f3432a18b09e4b6d73a716839d7a74", size = 111392, upload-time = "2025-06-11T13:17:06.053Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d0/37/42d09ad90fd1da960ff68facaa3b79418ccf66297f202ba5361038fc3182/geventhttpclient-2.3.4-cp311-cp311-win32.whl", hash = "sha256:ea87c25e933991366049a42c88e91ad20c2b72e11c7bd38ef68f80486ab63cb2", size = 48332, upload-time = "2025-06-11T13:17:06.965Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4b/0b/55e2a9ed4b1aed7c97e857dc9649a7e804609a105e1ef3cb01da857fbce7/geventhttpclient-2.3.4-cp311-cp311-win_amd64.whl", hash = "sha256:e02e0e9ef2e45475cf33816c8fb2e24595650bcf259e7b15b515a7b49cae1ccf", size = 48969, upload-time = "2025-06-11T13:17:08.239Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4f/72/dcbc6dbf838549b7b0c2c18c1365d2580eb7456939e4b608c3ab213fce78/geventhttpclient-2.3.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9ac30c38d86d888b42bb2ab2738ab9881199609e9fa9a153eb0c66fc9188c6cb", size = 71984, upload-time = "2025-06-11T13:17:09.126Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4c/f9/74aa8c556364ad39b238919c954a0da01a6154ad5e85a1d1ab5f9f5ac186/geventhttpclient-2.3.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4b802000a4fad80fa57e895009671d6e8af56777e3adf0d8aee0807e96188fd9", size = 52631, upload-time = "2025-06-11T13:17:10.061Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/11/1a/bc4b70cba8b46be8b2c6ca5b8067c4f086f8c90915eb68086ab40ff6243d/geventhttpclient-2.3.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:461e4d9f4caee481788ec95ac64e0a4a087c1964ddbfae9b6f2dc51715ba706c", size = 51991, upload-time = "2025-06-11T13:17:11.049Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/03/3f/5ce6e003b3b24f7caf3207285831afd1a4f857ce98ac45e1fb7a6815bd58/geventhttpclient-2.3.4-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:b7e41687c74e8fbe6a665458bbaea0c5a75342a95e2583738364a73bcbf1671b", size = 114982, upload-time = "2025-08-24T12:16:50.76Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/60/16/6f9dad141b7c6dd7ee831fbcd72dd02535c57bc1ec3c3282f07e72c31344/geventhttpclient-2.3.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c3ea5da20f4023cf40207ce15f5f4028377ffffdba3adfb60b4c8f34925fce79", size = 115654, upload-time = "2025-08-24T12:16:52.072Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ba/52/9b516a2ff423d8bd64c319e1950a165ceebb552781c5a88c1e94e93e8713/geventhttpclient-2.3.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:91f19a8a6899c27867dbdace9500f337d3e891a610708e86078915f1d779bf53", size = 121672, upload-time = "2025-08-24T12:16:53.361Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b0/f5/8d0f1e998f6d933c251b51ef92d11f7eb5211e3cd579018973a2b455f7c5/geventhttpclient-2.3.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41f2dcc0805551ea9d49f9392c3b9296505a89b9387417b148655d0d8251b36e", size = 119012, upload-time = "2025-06-11T13:17:11.956Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ea/0e/59e4ab506b3c19fc72e88ca344d150a9028a00c400b1099637100bec26fc/geventhttpclient-2.3.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:62f3a29bf242ecca6360d497304900683fd8f42cbf1de8d0546c871819251dad", size = 124565, upload-time = "2025-06-11T13:17:12.896Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/39/5d/dcbd34dfcda0c016b4970bd583cb260cc5ebfc35b33d0ec9ccdb2293587a/geventhttpclient-2.3.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8714a3f2c093aeda3ffdb14c03571d349cb3ed1b8b461d9f321890659f4a5dbf", size = 115573, upload-time = "2025-06-11T13:17:13.937Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/03/51/89af99e4805e9ce7f95562dfbd23c0b0391830831e43d58f940ec74489ac/geventhttpclient-2.3.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b11f38b74bab75282db66226197024a731250dcbe25542fd4e85ac5313547332", size = 114260, upload-time = "2025-06-11T13:17:14.913Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b3/ec/3a3000bda432953abcc6f51d008166fa7abc1eeddd1f0246933d83854f73/geventhttpclient-2.3.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:fccc2023a89dfbce2e1b1409b967011e45d41808df81b7fa0259397db79ba647", size = 111592, upload-time = "2025-06-11T13:17:15.879Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d8/a3/88fd71fe6bbe1315a2d161cbe2cc7810c357d99bced113bea1668ede8bcf/geventhttpclient-2.3.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9d54b8e9a44890159ae36ba4ae44efd8bb79ff519055137a340d357538a68aa3", size = 113216, upload-time = "2025-06-11T13:17:16.883Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/52/eb/20435585a6911b26e65f901a827ef13551c053133926f8c28a7cca0fb08e/geventhttpclient-2.3.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:407cb68a3c3a2c4f5d503930298f2b26ae68137d520e8846d8e230a9981d9334", size = 118450, upload-time = "2025-06-11T13:17:17.968Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2f/79/82782283d613570373990b676a0966c1062a38ca8f41a0f20843c5808e01/geventhttpclient-2.3.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:54fbbcca2dcf06f12a337dd8f98417a09a49aa9d9706aa530fc93acb59b7d83c", size = 112226, upload-time = "2025-06-11T13:17:18.942Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9c/c4/417d12fc2a31ad93172b03309c7f8c3a8bbd0cf25b95eb7835de26b24453/geventhttpclient-2.3.4-cp312-cp312-win32.whl", hash = "sha256:83143b41bde2eb010c7056f142cb764cfbf77f16bf78bda2323a160767455cf5", size = 48365, upload-time = "2025-06-11T13:17:20.096Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/cf/f4/7e5ee2f460bbbd09cb5d90ff63a1cf80d60f1c60c29dac20326324242377/geventhttpclient-2.3.4-cp312-cp312-win_amd64.whl", hash = "sha256:46eda9a9137b0ca7886369b40995d2a43a5dff033d0a839a54241015d1845d41", size = 48961, upload-time = "2025-06-11T13:17:21.111Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0b/a7/de506f91a1ec67d3c4a53f2aa7475e7ffb869a17b71b94ba370a027a69ac/geventhttpclient-2.3.4-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:707a66cd1e3bf06e2c4f8f21d3b4e6290c9e092456f489c560345a8663cdd93e", size = 50828, upload-time = "2025-06-11T13:17:57.589Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2b/43/86479c278e96cd3e190932b0003d5b8e415660d9e519d59094728ae249da/geventhttpclient-2.3.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:0129ce7ef50e67d66ea5de44d89a3998ab778a4db98093d943d6855323646fa5", size = 50086, upload-time = "2025-06-11T13:17:58.567Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e8/f7/d3e04f95de14db3ca4fe126eb0e3ec24356125c5ca1f471a9b28b1d7714d/geventhttpclient-2.3.4-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fac2635f68b3b6752c2a576833d9d18f0af50bdd4bd7dd2d2ca753e3b8add84c", size = 54523, upload-time = "2025-06-11T13:17:59.536Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/45/a7/d80c9ec1663f70f4bd976978bf86b3d0d123a220c4ae636c66d02d3accdb/geventhttpclient-2.3.4-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:71206ab89abdd0bd5fee21e04a3995ec1f7d8ae1478ee5868f9e16e85a831653", size = 58866, upload-time = "2025-06-11T13:18:03.719Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/55/92/d874ff7e52803cef3850bf8875816a9f32e0a154b079a74e6663534bef30/geventhttpclient-2.3.4-pp311-pypy311_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8bde667d0ce46065fe57f8ff24b2e94f620a5747378c97314dcfc8fbab35b73", size = 54766, upload-time = "2025-06-11T13:18:04.724Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a8/73/2e03125170485193fcc99ef23b52749543d6c6711706d58713fe315869c4/geventhttpclient-2.3.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:5f71c75fc138331cbbe668a08951d36b641d2c26fb3677d7e497afb8419538db", size = 49011, upload-time = "2025-06-11T13:18:05.702Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "gitdb"
|
||||
version = "4.0.12"
|
||||
@@ -2959,6 +3033,51 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e2/3b/a9a17366af80127bd09decbe2a54d8974b6d8b274b39bf47fbaedeec6307/llvmlite-0.44.0-cp312-cp312-win_amd64.whl", hash = "sha256:eae7e2d4ca8f88f89d315b48c6b741dcb925d6a1042da694aa16ab3dd4cbd3a1", size = 30332380, upload-time = "2025-01-20T11:14:02.442Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "locust"
|
||||
version = "2.40.4"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "configargparse" },
|
||||
{ name = "flask" },
|
||||
{ name = "flask-cors" },
|
||||
{ name = "flask-login" },
|
||||
{ name = "gevent" },
|
||||
{ name = "geventhttpclient" },
|
||||
{ name = "locust-cloud" },
|
||||
{ name = "msgpack" },
|
||||
{ name = "psutil" },
|
||||
{ name = "pytest" },
|
||||
{ name = "python-engineio" },
|
||||
{ name = "python-socketio", extra = ["client"] },
|
||||
{ name = "pywin32", marker = "sys_platform == 'win32'" },
|
||||
{ name = "pyzmq" },
|
||||
{ name = "requests" },
|
||||
{ name = "setuptools" },
|
||||
{ name = "typing-extensions", marker = "python_full_version < '3.12'" },
|
||||
{ name = "werkzeug" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c8/40/31ff56ab6f46c7c77e61bbbd23f87fdf6a4aaf674dc961a3c573320caedc/locust-2.40.4.tar.gz", hash = "sha256:3a3a470459edc4ba1349229bf1aca4c0cb651c4e2e3f85d3bc28fe8118f5a18f", size = 1412529, upload-time = "2025-09-11T09:26:13.713Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/79/7e/db1d969caf45ce711e81cd4f3e7c4554c3925a02383a1dcadb442eae3802/locust-2.40.4-py3-none-any.whl", hash = "sha256:50e647a73c5a4e7a775c6e4311979472fce8b00ed783837a2ce9bb36786f7d1a", size = 1430961, upload-time = "2025-09-11T09:26:11.623Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "locust-cloud"
|
||||
version = "1.26.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "configargparse" },
|
||||
{ name = "gevent" },
|
||||
{ name = "platformdirs" },
|
||||
{ name = "python-engineio" },
|
||||
{ name = "python-socketio", extra = ["client"] },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/84/ad/10b299b134068a4250a9156e6832a717406abe1dfea2482a07ae7bdca8f3/locust_cloud-1.26.3.tar.gz", hash = "sha256:587acfd4d2dee715fb5f0c3c2d922770babf0b7cff7b2927afbb693a9cd193cc", size = 456042, upload-time = "2025-07-15T19:51:53.791Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/50/6a/276fc50a9d170e7cbb6715735480cb037abb526639bca85491576e6eee4a/locust_cloud-1.26.3-py3-none-any.whl", hash = "sha256:8cb4b8bb9adcd5b99327bc8ed1d98cf67a29d9d29512651e6e94869de6f1faa8", size = 410023, upload-time = "2025-07-15T19:51:52.056Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lxml"
|
||||
version = "6.0.0"
|
||||
@@ -3230,6 +3349,34 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/5e/75/bd9b7bb966668920f06b200e84454c8f3566b102183bc55c5473d96cb2b9/msal_extensions-1.3.1-py3-none-any.whl", hash = "sha256:96d3de4d034504e969ac5e85bae8106c8373b5c6568e4c8fa7af2eca9dbe6bca", size = 20583, upload-time = "2025-03-14T23:51:03.016Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "msgpack"
|
||||
version = "1.1.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/45/b1/ea4f68038a18c77c9467400d166d74c4ffa536f34761f7983a104357e614/msgpack-1.1.1.tar.gz", hash = "sha256:77b79ce34a2bdab2594f490c8e80dd62a02d650b91a75159a63ec413b8d104cd", size = 173555, upload-time = "2025-06-13T06:52:51.324Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/7f/83/97f24bf9848af23fe2ba04380388216defc49a8af6da0c28cc636d722502/msgpack-1.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:71ef05c1726884e44f8b1d1773604ab5d4d17729d8491403a705e649116c9558", size = 82728, upload-time = "2025-06-13T06:51:50.68Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/aa/7f/2eaa388267a78401f6e182662b08a588ef4f3de6f0eab1ec09736a7aaa2b/msgpack-1.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:36043272c6aede309d29d56851f8841ba907a1a3d04435e43e8a19928e243c1d", size = 79279, upload-time = "2025-06-13T06:51:51.72Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f8/46/31eb60f4452c96161e4dfd26dbca562b4ec68c72e4ad07d9566d7ea35e8a/msgpack-1.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a32747b1b39c3ac27d0670122b57e6e57f28eefb725e0b625618d1b59bf9d1e0", size = 423859, upload-time = "2025-06-13T06:51:52.749Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/45/16/a20fa8c32825cc7ae8457fab45670c7a8996d7746ce80ce41cc51e3b2bd7/msgpack-1.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a8b10fdb84a43e50d38057b06901ec9da52baac6983d3f709d8507f3889d43f", size = 429975, upload-time = "2025-06-13T06:51:53.97Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/86/ea/6c958e07692367feeb1a1594d35e22b62f7f476f3c568b002a5ea09d443d/msgpack-1.1.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba0c325c3f485dc54ec298d8b024e134acf07c10d494ffa24373bea729acf704", size = 413528, upload-time = "2025-06-13T06:51:55.507Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/75/05/ac84063c5dae79722bda9f68b878dc31fc3059adb8633c79f1e82c2cd946/msgpack-1.1.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:88daaf7d146e48ec71212ce21109b66e06a98e5e44dca47d853cbfe171d6c8d2", size = 413338, upload-time = "2025-06-13T06:51:57.023Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/69/e8/fe86b082c781d3e1c09ca0f4dacd457ede60a13119b6ce939efe2ea77b76/msgpack-1.1.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:d8b55ea20dc59b181d3f47103f113e6f28a5e1c89fd5b67b9140edb442ab67f2", size = 422658, upload-time = "2025-06-13T06:51:58.419Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3b/2b/bafc9924df52d8f3bb7c00d24e57be477f4d0f967c0a31ef5e2225e035c7/msgpack-1.1.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4a28e8072ae9779f20427af07f53bbb8b4aa81151054e882aee333b158da8752", size = 427124, upload-time = "2025-06-13T06:51:59.969Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a2/3b/1f717e17e53e0ed0b68fa59e9188f3f610c79d7151f0e52ff3cd8eb6b2dc/msgpack-1.1.1-cp311-cp311-win32.whl", hash = "sha256:7da8831f9a0fdb526621ba09a281fadc58ea12701bc709e7b8cbc362feabc295", size = 65016, upload-time = "2025-06-13T06:52:01.294Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/48/45/9d1780768d3b249accecc5a38c725eb1e203d44a191f7b7ff1941f7df60c/msgpack-1.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:5fd1b58e1431008a57247d6e7cc4faa41c3607e8e7d4aaf81f7c29ea013cb458", size = 72267, upload-time = "2025-06-13T06:52:02.568Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e3/26/389b9c593eda2b8551b2e7126ad3a06af6f9b44274eb3a4f054d48ff7e47/msgpack-1.1.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ae497b11f4c21558d95de9f64fff7053544f4d1a17731c866143ed6bb4591238", size = 82359, upload-time = "2025-06-13T06:52:03.909Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ab/65/7d1de38c8a22cf8b1551469159d4b6cf49be2126adc2482de50976084d78/msgpack-1.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:33be9ab121df9b6b461ff91baac6f2731f83d9b27ed948c5b9d1978ae28bf157", size = 79172, upload-time = "2025-06-13T06:52:05.246Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0f/bd/cacf208b64d9577a62c74b677e1ada005caa9b69a05a599889d6fc2ab20a/msgpack-1.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f64ae8fe7ffba251fecb8408540c34ee9df1c26674c50c4544d72dbf792e5ce", size = 425013, upload-time = "2025-06-13T06:52:06.341Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4d/ec/fd869e2567cc9c01278a736cfd1697941ba0d4b81a43e0aa2e8d71dab208/msgpack-1.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a494554874691720ba5891c9b0b39474ba43ffb1aaf32a5dac874effb1619e1a", size = 426905, upload-time = "2025-06-13T06:52:07.501Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/55/2a/35860f33229075bce803a5593d046d8b489d7ba2fc85701e714fc1aaf898/msgpack-1.1.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cb643284ab0ed26f6957d969fe0dd8bb17beb567beb8998140b5e38a90974f6c", size = 407336, upload-time = "2025-06-13T06:52:09.047Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8c/16/69ed8f3ada150bf92745fb4921bd621fd2cdf5a42e25eb50bcc57a5328f0/msgpack-1.1.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d275a9e3c81b1093c060c3837e580c37f47c51eca031f7b5fb76f7b8470f5f9b", size = 409485, upload-time = "2025-06-13T06:52:10.382Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c6/b6/0c398039e4c6d0b2e37c61d7e0e9d13439f91f780686deb8ee64ecf1ae71/msgpack-1.1.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4fd6b577e4541676e0cc9ddc1709d25014d3ad9a66caa19962c4f5de30fc09ef", size = 412182, upload-time = "2025-06-13T06:52:11.644Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b8/d0/0cf4a6ecb9bc960d624c93effaeaae75cbf00b3bc4a54f35c8507273cda1/msgpack-1.1.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:bb29aaa613c0a1c40d1af111abf025f1732cab333f96f285d6a93b934738a68a", size = 419883, upload-time = "2025-06-13T06:52:12.806Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/62/83/9697c211720fa71a2dfb632cad6196a8af3abea56eece220fde4674dc44b/msgpack-1.1.1-cp312-cp312-win32.whl", hash = "sha256:870b9a626280c86cff9c576ec0d9cbcc54a1e5ebda9cd26dab12baf41fee218c", size = 65406, upload-time = "2025-06-13T06:52:14.271Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c0/23/0abb886e80eab08f5e8c485d6f13924028602829f63b8f5fa25a06636628/msgpack-1.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:5692095123007180dca3e788bb4c399cc26626da51629a31d40207cb262e67f4", size = 72558, upload-time = "2025-06-13T06:52:15.252Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "msrest"
|
||||
version = "0.7.1"
|
||||
@@ -4826,6 +4973,18 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/6a/3e/b68c118422ec867fa7ab88444e1274aa40681c606d59ac27de5a5588f082/python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a", size = 19863, upload-time = "2024-01-23T06:32:58.246Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "python-engineio"
|
||||
version = "4.12.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "simple-websocket" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ba/0b/67295279b66835f9fa7a491650efcd78b20321c127036eef62c11a31e028/python_engineio-4.12.2.tar.gz", hash = "sha256:e7e712ffe1be1f6a05ee5f951e72d434854a32fcfc7f6e4d9d3cae24ec70defa", size = 91677, upload-time = "2025-06-04T19:22:18.789Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/0c/fa/df59acedf7bbb937f69174d00f921a7b93aa5a5f5c17d05296c814fff6fc/python_engineio-4.12.2-py3-none-any.whl", hash = "sha256:8218ab66950e179dfec4b4bbb30aecf3f5d86f5e58e6fc1aa7fde2c698b2804f", size = 59536, upload-time = "2025-06-04T19:22:16.916Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "python-http-client"
|
||||
version = "3.3.7"
|
||||
@@ -4882,6 +5041,25 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d9/4f/00be2196329ebbff56ce564aa94efb0fbc828d00de250b1980de1a34ab49/python_pptx-1.0.2-py3-none-any.whl", hash = "sha256:160838e0b8565a8b1f67947675886e9fea18aa5e795db7ae531606d68e785cba", size = 472788, upload-time = "2024-08-07T17:33:28.192Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "python-socketio"
|
||||
version = "5.13.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "bidict" },
|
||||
{ name = "python-engineio" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/21/1a/396d50ccf06ee539fa758ce5623b59a9cb27637fc4b2dc07ed08bf495e77/python_socketio-5.13.0.tar.gz", hash = "sha256:ac4e19a0302ae812e23b712ec8b6427ca0521f7c582d6abb096e36e24a263029", size = 121125, upload-time = "2025-04-12T15:46:59.933Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/3c/32/b4fb8585d1be0f68bde7e110dffbcf354915f77ad8c778563f0ad9655c02/python_socketio-5.13.0-py3-none-any.whl", hash = "sha256:51f68d6499f2df8524668c24bcec13ba1414117cfb3a90115c559b601ab10caf", size = 77800, upload-time = "2025-04-12T15:46:58.412Z" },
|
||||
]
|
||||
|
||||
[package.optional-dependencies]
|
||||
client = [
|
||||
{ name = "requests" },
|
||||
{ name = "websocket-client" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pytz"
|
||||
version = "2025.2"
|
||||
@@ -4939,6 +5117,42 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338, upload-time = "2024-08-06T20:32:41.93Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyzmq"
|
||||
version = "27.1.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "cffi", marker = "implementation_name == 'pypy'" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/04/0b/3c9baedbdf613ecaa7aa07027780b8867f57b6293b6ee50de316c9f3222b/pyzmq-27.1.0.tar.gz", hash = "sha256:ac0765e3d44455adb6ddbf4417dcce460fc40a05978c08efdf2948072f6db540", size = 281750, upload-time = "2025-09-08T23:10:18.157Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/06/5d/305323ba86b284e6fcb0d842d6adaa2999035f70f8c38a9b6d21ad28c3d4/pyzmq-27.1.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:226b091818d461a3bef763805e75685e478ac17e9008f49fce2d3e52b3d58b86", size = 1333328, upload-time = "2025-09-08T23:07:45.946Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bd/a0/fc7e78a23748ad5443ac3275943457e8452da67fda347e05260261108cbc/pyzmq-27.1.0-cp311-cp311-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:0790a0161c281ca9723f804871b4027f2e8b5a528d357c8952d08cd1a9c15581", size = 908803, upload-time = "2025-09-08T23:07:47.551Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7e/22/37d15eb05f3bdfa4abea6f6d96eb3bb58585fbd3e4e0ded4e743bc650c97/pyzmq-27.1.0-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c895a6f35476b0c3a54e3eb6ccf41bf3018de937016e6e18748317f25d4e925f", size = 668836, upload-time = "2025-09-08T23:07:49.436Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b1/c4/2a6fe5111a01005fc7af3878259ce17684fabb8852815eda6225620f3c59/pyzmq-27.1.0-cp311-cp311-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5bbf8d3630bf96550b3be8e1fc0fea5cbdc8d5466c1192887bd94869da17a63e", size = 857038, upload-time = "2025-09-08T23:07:51.234Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/cb/eb/bfdcb41d0db9cd233d6fb22dc131583774135505ada800ebf14dfb0a7c40/pyzmq-27.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:15c8bd0fe0dabf808e2d7a681398c4e5ded70a551ab47482067a572c054c8e2e", size = 1657531, upload-time = "2025-09-08T23:07:52.795Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ab/21/e3180ca269ed4a0de5c34417dfe71a8ae80421198be83ee619a8a485b0c7/pyzmq-27.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:bafcb3dd171b4ae9f19ee6380dfc71ce0390fefaf26b504c0e5f628d7c8c54f2", size = 2034786, upload-time = "2025-09-08T23:07:55.047Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3b/b1/5e21d0b517434b7f33588ff76c177c5a167858cc38ef740608898cd329f2/pyzmq-27.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e829529fcaa09937189178115c49c504e69289abd39967cd8a4c215761373394", size = 1894220, upload-time = "2025-09-08T23:07:57.172Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/03/f2/44913a6ff6941905efc24a1acf3d3cb6146b636c546c7406c38c49c403d4/pyzmq-27.1.0-cp311-cp311-win32.whl", hash = "sha256:6df079c47d5902af6db298ec92151db82ecb557af663098b92f2508c398bb54f", size = 567155, upload-time = "2025-09-08T23:07:59.05Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/23/6d/d8d92a0eb270a925c9b4dd039c0b4dc10abc2fcbc48331788824ef113935/pyzmq-27.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:190cbf120fbc0fc4957b56866830def56628934a9d112aec0e2507aa6a032b97", size = 633428, upload-time = "2025-09-08T23:08:00.663Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ae/14/01afebc96c5abbbd713ecfc7469cfb1bc801c819a74ed5c9fad9a48801cb/pyzmq-27.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:eca6b47df11a132d1745eb3b5b5e557a7dae2c303277aa0e69c6ba91b8736e07", size = 559497, upload-time = "2025-09-08T23:08:02.15Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/92/e7/038aab64a946d535901103da16b953c8c9cc9c961dadcbf3609ed6428d23/pyzmq-27.1.0-cp312-abi3-macosx_10_15_universal2.whl", hash = "sha256:452631b640340c928fa343801b0d07eb0c3789a5ffa843f6e1a9cee0ba4eb4fc", size = 1306279, upload-time = "2025-09-08T23:08:03.807Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e8/5e/c3c49fdd0f535ef45eefcc16934648e9e59dace4a37ee88fc53f6cd8e641/pyzmq-27.1.0-cp312-abi3-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:1c179799b118e554b66da67d88ed66cd37a169f1f23b5d9f0a231b4e8d44a113", size = 895645, upload-time = "2025-09-08T23:08:05.301Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f8/e5/b0b2504cb4e903a74dcf1ebae157f9e20ebb6ea76095f6cfffea28c42ecd/pyzmq-27.1.0-cp312-abi3-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3837439b7f99e60312f0c926a6ad437b067356dc2bc2ec96eb395fd0fe804233", size = 652574, upload-time = "2025-09-08T23:08:06.828Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f8/9b/c108cdb55560eaf253f0cbdb61b29971e9fb34d9c3499b0e96e4e60ed8a5/pyzmq-27.1.0-cp312-abi3-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:43ad9a73e3da1fab5b0e7e13402f0b2fb934ae1c876c51d0afff0e7c052eca31", size = 840995, upload-time = "2025-09-08T23:08:08.396Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c2/bb/b79798ca177b9eb0825b4c9998c6af8cd2a7f15a6a1a4272c1d1a21d382f/pyzmq-27.1.0-cp312-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:0de3028d69d4cdc475bfe47a6128eb38d8bc0e8f4d69646adfbcd840facbac28", size = 1642070, upload-time = "2025-09-08T23:08:09.989Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9c/80/2df2e7977c4ede24c79ae39dcef3899bfc5f34d1ca7a5b24f182c9b7a9ca/pyzmq-27.1.0-cp312-abi3-musllinux_1_2_i686.whl", hash = "sha256:cf44a7763aea9298c0aa7dbf859f87ed7012de8bda0f3977b6fb1d96745df856", size = 2021121, upload-time = "2025-09-08T23:08:11.907Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/46/bd/2d45ad24f5f5ae7e8d01525eb76786fa7557136555cac7d929880519e33a/pyzmq-27.1.0-cp312-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:f30f395a9e6fbca195400ce833c731e7b64c3919aa481af4d88c3759e0cb7496", size = 1878550, upload-time = "2025-09-08T23:08:13.513Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e6/2f/104c0a3c778d7c2ab8190e9db4f62f0b6957b53c9d87db77c284b69f33ea/pyzmq-27.1.0-cp312-abi3-win32.whl", hash = "sha256:250e5436a4ba13885494412b3da5d518cd0d3a278a1ae640e113c073a5f88edd", size = 559184, upload-time = "2025-09-08T23:08:15.163Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/7f/a21b20d577e4100c6a41795842028235998a643b1ad406a6d4163ea8f53e/pyzmq-27.1.0-cp312-abi3-win_amd64.whl", hash = "sha256:9ce490cf1d2ca2ad84733aa1d69ce6855372cb5ce9223802450c9b2a7cba0ccf", size = 619480, upload-time = "2025-09-08T23:08:17.192Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/78/c2/c012beae5f76b72f007a9e91ee9401cb88c51d0f83c6257a03e785c81cc2/pyzmq-27.1.0-cp312-abi3-win_arm64.whl", hash = "sha256:75a2f36223f0d535a0c919e23615fc85a1e23b71f40c7eb43d7b1dedb4d8f15f", size = 552993, upload-time = "2025-09-08T23:08:18.926Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4c/c6/c4dcdecdbaa70969ee1fdced6d7b8f60cfabe64d25361f27ac4665a70620/pyzmq-27.1.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:18770c8d3563715387139060d37859c02ce40718d1faf299abddcdcc6a649066", size = 836265, upload-time = "2025-09-08T23:09:49.376Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3e/79/f38c92eeaeb03a2ccc2ba9866f0439593bb08c5e3b714ac1d553e5c96e25/pyzmq-27.1.0-pp311-pypy311_pp73-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:ac25465d42f92e990f8d8b0546b01c391ad431c3bf447683fdc40565941d0604", size = 800208, upload-time = "2025-09-08T23:09:51.073Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/0e/3f0d0d335c6b3abb9b7b723776d0b21fa7f3a6c819a0db6097059aada160/pyzmq-27.1.0-pp311-pypy311_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:53b40f8ae006f2734ee7608d59ed661419f087521edbfc2149c3932e9c14808c", size = 567747, upload-time = "2025-09-08T23:09:52.698Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a1/cf/f2b3784d536250ffd4be70e049f3b60981235d70c6e8ce7e3ef21e1adb25/pyzmq-27.1.0-pp311-pypy311_pp73-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f605d884e7c8be8fe1aa94e0a783bf3f591b84c24e4bc4f3e7564c82ac25e271", size = 747371, upload-time = "2025-09-08T23:09:54.563Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/01/1b/5dbe84eefc86f48473947e2f41711aded97eecef1231f4558f1f02713c12/pyzmq-27.1.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c9f7f6e13dff2e44a6afeaf2cf54cee5929ad64afaf4d40b50f93c58fc687355", size = 544862, upload-time = "2025-09-08T23:09:56.509Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "qdrant-client"
|
||||
version = "1.9.0"
|
||||
@@ -5387,6 +5601,18 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e0/f9/0595336914c5619e5f28a1fb793285925a8cd4b432c9da0a987836c7f822/shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686", size = 9755, upload-time = "2023-10-24T04:13:38.866Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "simple-websocket"
|
||||
version = "1.1.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "wsproto" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b0/d4/bfa032f961103eba93de583b161f0e6a5b63cebb8f2c7d0c6e6efe1e3d2e/simple_websocket-1.1.0.tar.gz", hash = "sha256:7939234e7aa067c534abdab3a9ed933ec9ce4691b0713c78acb195560aa52ae4", size = 17300, upload-time = "2024-10-10T22:39:31.412Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/52/59/0782e51887ac6b07ffd1570e0364cf901ebc36345fea669969d2084baebb/simple_websocket-1.1.0-py3-none-any.whl", hash = "sha256:4af6069630a38ed6c561010f0e11a5bc0d4ca569b36306eb257cd9a192497c8c", size = 13842, upload-time = "2024-10-10T22:39:29.645Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "six"
|
||||
version = "1.17.0"
|
||||
@@ -6794,6 +7020,18 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/2d/82/f56956041adef78f849db6b289b282e72b55ab8045a75abad81898c28d19/wrapt-1.17.2-py3-none-any.whl", hash = "sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8", size = 23594, upload-time = "2025-01-14T10:35:44.018Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wsproto"
|
||||
version = "1.2.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "h11" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c9/4a/44d3c295350d776427904d73c189e10aeae66d7f555bb2feee16d1e4ba5a/wsproto-1.2.0.tar.gz", hash = "sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065", size = 53425, upload-time = "2022-08-23T19:58:21.447Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/78/58/e860788190eba3bcce367f74d29c4675466ce8dddfba85f7827588416f01/wsproto-1.2.0-py3-none-any.whl", hash = "sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736", size = 24226, upload-time = "2022-08-23T19:58:19.96Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "xinference-client"
|
||||
version = "1.2.2"
|
||||
|
||||
521
scripts/stress-test/README.md
Normal file
521
scripts/stress-test/README.md
Normal file
@@ -0,0 +1,521 @@
|
||||
# Dify Stress Test Suite
|
||||
|
||||
A high-performance stress test suite for Dify workflow execution using **Locust** - optimized for measuring Server-Sent Events (SSE) streaming performance.
|
||||
|
||||
## Key Metrics Tracked
|
||||
|
||||
The stress test focuses on four critical SSE performance indicators:
|
||||
|
||||
1. **Active SSE Connections** - Real-time count of open SSE connections
|
||||
1. **New Connection Rate** - Connections per second (conn/sec)
|
||||
1. **Time to First Event (TTFE)** - Latency until first SSE event arrives
|
||||
1. **Event Throughput** - Events per second (events/sec)
|
||||
|
||||
## Features
|
||||
|
||||
- **True SSE Support**: Properly handles Server-Sent Events streaming without premature connection closure
|
||||
- **Real-time Metrics**: Live reporting every 5 seconds during tests
|
||||
- **Comprehensive Tracking**:
|
||||
- Active connection monitoring
|
||||
- Connection establishment rate
|
||||
- Event processing throughput
|
||||
- TTFE distribution analysis
|
||||
- **Multiple Interfaces**:
|
||||
- Web UI for real-time monitoring (<http://localhost:8089>)
|
||||
- Headless mode with periodic console updates
|
||||
- **Detailed Reports**: Final statistics with overall rates and averages
|
||||
- **Easy Configuration**: Uses existing API key configuration from setup
|
||||
|
||||
## What Gets Measured
|
||||
|
||||
The stress test focuses on SSE streaming performance with these key metrics:
|
||||
|
||||
### Primary Endpoint: `/v1/workflows/run`
|
||||
|
||||
The stress test tests a single endpoint with comprehensive SSE metrics tracking:
|
||||
|
||||
- **Request Type**: POST request to workflow execution API
|
||||
- **Response Type**: Server-Sent Events (SSE) stream
|
||||
- **Payload**: Random questions from a configurable pool
|
||||
- **Concurrency**: Configurable from 1 to 1000+ simultaneous users
|
||||
|
||||
### Key Performance Metrics
|
||||
|
||||
#### 1. **Active Connections**
|
||||
|
||||
- **What it measures**: Number of concurrent SSE connections open at any moment
|
||||
- **Why it matters**: Shows system's ability to handle parallel streams
|
||||
- **Good values**: Should remain stable under load without drops
|
||||
|
||||
#### 2. **Connection Rate (conn/sec)**
|
||||
|
||||
- **What it measures**: How fast new SSE connections are established
|
||||
- **Why it matters**: Indicates system's ability to handle connection spikes
|
||||
- **Good values**:
|
||||
- Light load: 5-10 conn/sec
|
||||
- Medium load: 20-50 conn/sec
|
||||
- Heavy load: 100+ conn/sec
|
||||
|
||||
#### 3. **Time to First Event (TTFE)**
|
||||
|
||||
- **What it measures**: Latency from request sent to first SSE event received
|
||||
- **Why it matters**: Critical for user experience - faster TTFE = better perceived performance
|
||||
- **Good values**:
|
||||
- Excellent: < 50ms
|
||||
- Good: 50-100ms
|
||||
- Acceptable: 100-500ms
|
||||
- Poor: > 500ms
|
||||
|
||||
#### 4. **Event Throughput (events/sec)**
|
||||
|
||||
- **What it measures**: Rate of SSE events being delivered across all connections
|
||||
- **Why it matters**: Shows actual data delivery performance
|
||||
- **Expected values**: Depends on workflow complexity and number of connections
|
||||
- Single connection: 10-20 events/sec
|
||||
- 10 connections: 50-100 events/sec
|
||||
- 100 connections: 200-500 events/sec
|
||||
|
||||
#### 5. **Request/Response Times**
|
||||
|
||||
- **P50 (Median)**: 50% of requests complete within this time
|
||||
- **P95**: 95% of requests complete within this time
|
||||
- **P99**: 99% of requests complete within this time
|
||||
- **Min/Max**: Best and worst case response times
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. **Dependencies are automatically installed** when running setup:
|
||||
|
||||
- Locust (load testing framework)
|
||||
- sseclient-py (SSE client library)
|
||||
|
||||
1. **Complete Dify setup**:
|
||||
|
||||
```bash
|
||||
# Run the complete setup
|
||||
python scripts/stress-test/setup_all.py
|
||||
```
|
||||
|
||||
1. **Ensure services are running**:
|
||||
|
||||
**IMPORTANT**: For accurate stress testing, run the API server with Gunicorn in production mode:
|
||||
|
||||
```bash
|
||||
# Run from the api directory
|
||||
cd api
|
||||
uv run gunicorn \
|
||||
--bind 0.0.0.0:5001 \
|
||||
--workers 4 \
|
||||
--worker-class gevent \
|
||||
--timeout 120 \
|
||||
--keep-alive 5 \
|
||||
--log-level info \
|
||||
--access-logfile - \
|
||||
--error-logfile - \
|
||||
app:app
|
||||
```
|
||||
|
||||
**Configuration options explained**:
|
||||
|
||||
- `--workers 4`: Number of worker processes (adjust based on CPU cores)
|
||||
- `--worker-class gevent`: Async worker for handling concurrent connections
|
||||
- `--timeout 120`: Worker timeout for long-running requests
|
||||
- `--keep-alive 5`: Keep connections alive for SSE streaming
|
||||
|
||||
**NOT RECOMMENDED for stress testing**:
|
||||
|
||||
```bash
|
||||
# Debug mode - DO NOT use for stress testing (slow performance)
|
||||
./dev/start-api # This runs Flask in debug mode with single-threaded execution
|
||||
```
|
||||
|
||||
**Also start the Mock OpenAI server**:
|
||||
|
||||
```bash
|
||||
python scripts/stress-test/setup/mock_openai_server.py
|
||||
```
|
||||
|
||||
## Running the Stress Test
|
||||
|
||||
```bash
|
||||
# Run with default configuration (headless mode)
|
||||
./scripts/stress-test/run_locust_stress_test.sh
|
||||
|
||||
# Or run directly with uv
|
||||
uv run --project api python -m locust -f scripts/stress-test/sse_benchmark.py --host http://localhost:5001
|
||||
|
||||
# Run with Web UI (access at http://localhost:8089)
|
||||
uv run --project api python -m locust -f scripts/stress-test/sse_benchmark.py --host http://localhost:5001 --web-port 8089
|
||||
```
|
||||
|
||||
The script will:
|
||||
|
||||
1. Validate that all required services are running
|
||||
1. Check API token availability
|
||||
1. Execute the Locust stress test with SSE support
|
||||
1. Generate comprehensive reports in the `reports/` directory
|
||||
|
||||
## Configuration
|
||||
|
||||
The stress test configuration is in `locust.conf`:
|
||||
|
||||
```ini
|
||||
users = 10 # Number of concurrent users
|
||||
spawn-rate = 2 # Users spawned per second
|
||||
run-time = 1m # Test duration (30s, 5m, 1h)
|
||||
headless = true # Run without web UI
|
||||
```
|
||||
|
||||
### Custom Question Sets
|
||||
|
||||
Modify the questions list in `sse_benchmark.py`:
|
||||
|
||||
```python
|
||||
self.questions = [
|
||||
"Your custom question 1",
|
||||
"Your custom question 2",
|
||||
# Add more questions...
|
||||
]
|
||||
```
|
||||
|
||||
## Understanding the Results
|
||||
|
||||
### Report Structure
|
||||
|
||||
After running the stress test, you'll find these files in the `reports/` directory:
|
||||
|
||||
- `locust_summary_YYYYMMDD_HHMMSS.txt` - Complete console output with metrics
|
||||
- `locust_report_YYYYMMDD_HHMMSS.html` - Interactive HTML report with charts
|
||||
- `locust_YYYYMMDD_HHMMSS_stats.csv` - CSV with detailed statistics
|
||||
- `locust_YYYYMMDD_HHMMSS_stats_history.csv` - Time-series data
|
||||
|
||||
### Key Metrics
|
||||
|
||||
**Requests Per Second (RPS)**:
|
||||
|
||||
- **Excellent**: > 50 RPS
|
||||
- **Good**: 20-50 RPS
|
||||
- **Acceptable**: 10-20 RPS
|
||||
- **Needs Improvement**: < 10 RPS
|
||||
|
||||
**Response Time Percentiles**:
|
||||
|
||||
- **P50 (Median)**: 50% of requests complete within this time
|
||||
- **P95**: 95% of requests complete within this time
|
||||
- **P99**: 99% of requests complete within this time
|
||||
|
||||
**Success Rate**:
|
||||
|
||||
- Should be > 99% for production readiness
|
||||
- Lower rates indicate errors or timeouts
|
||||
|
||||
### Example Output
|
||||
|
||||
```text
|
||||
============================================================
|
||||
DIFY SSE STRESS TEST
|
||||
============================================================
|
||||
|
||||
[2025-09-12 15:45:44,468] Starting test run with 10 users at 2 users/sec
|
||||
|
||||
============================================================
|
||||
SSE Metrics | Active: 8 | Total Conn: 142 | Events: 2841
|
||||
Rates: 2.4 conn/s | 47.3 events/s | TTFE: 43ms
|
||||
============================================================
|
||||
|
||||
Type Name # reqs # fails | Avg Min Max Med | req/s failures/s
|
||||
---------|------------------------------|--------|--------|--------|--------|--------|--------|--------|-----------
|
||||
POST /v1/workflows/run 142 0(0.00%) | 41 18 192 38 | 2.37 0.00
|
||||
---------|------------------------------|--------|--------|--------|--------|--------|--------|--------|-----------
|
||||
Aggregated 142 0(0.00%) | 41 18 192 38 | 2.37 0.00
|
||||
|
||||
============================================================
|
||||
FINAL RESULTS
|
||||
============================================================
|
||||
Total Connections: 142
|
||||
Total Events: 2841
|
||||
Average TTFE: 43 ms
|
||||
============================================================
|
||||
```
|
||||
|
||||
### How to Read the Results
|
||||
|
||||
**Live SSE Metrics Box (Updates every 10 seconds):**
|
||||
|
||||
```text
|
||||
SSE Metrics | Active: 8 | Total Conn: 142 | Events: 2841
|
||||
Rates: 2.4 conn/s | 47.3 events/s | TTFE: 43ms
|
||||
```
|
||||
|
||||
- **Active**: Current number of open SSE connections
|
||||
- **Total Conn**: Cumulative connections established
|
||||
- **Events**: Total SSE events received
|
||||
- **conn/s**: Connection establishment rate
|
||||
- **events/s**: Event delivery rate
|
||||
- **TTFE**: Average time to first event
|
||||
|
||||
**Standard Locust Table:**
|
||||
|
||||
```text
|
||||
Type Name # reqs # fails | Avg Min Max Med | req/s
|
||||
POST /v1/workflows/run 142 0(0.00%) | 41 18 192 38 | 2.37
|
||||
```
|
||||
|
||||
- **Type**: Always POST for our SSE requests
|
||||
- **Name**: The API endpoint being tested
|
||||
- **# reqs**: Total requests made
|
||||
- **# fails**: Failed requests (should be 0)
|
||||
- **Avg/Min/Max/Med**: Response time percentiles (ms)
|
||||
- **req/s**: Request throughput
|
||||
|
||||
**Performance Targets:**
|
||||
|
||||
✅ **Good Performance**:
|
||||
|
||||
- Zero failures (0.00%)
|
||||
- TTFE < 100ms
|
||||
- Stable active connections
|
||||
- Consistent event throughput
|
||||
|
||||
⚠️ **Warning Signs**:
|
||||
|
||||
- Failures > 1%
|
||||
- TTFE > 500ms
|
||||
- Dropping active connections
|
||||
- Declining event rate over time
|
||||
|
||||
## Test Scenarios
|
||||
|
||||
### Light Load
|
||||
|
||||
```yaml
|
||||
concurrency: 10
|
||||
iterations: 100
|
||||
```
|
||||
|
||||
### Normal Load
|
||||
|
||||
```yaml
|
||||
concurrency: 100
|
||||
iterations: 1000
|
||||
```
|
||||
|
||||
### Heavy Load
|
||||
|
||||
```yaml
|
||||
concurrency: 500
|
||||
iterations: 5000
|
||||
```
|
||||
|
||||
### Stress Test
|
||||
|
||||
```yaml
|
||||
concurrency: 1000
|
||||
iterations: 10000
|
||||
```
|
||||
|
||||
## Performance Tuning
|
||||
|
||||
### API Server Optimization
|
||||
|
||||
**Gunicorn Tuning for Different Load Levels**:
|
||||
|
||||
```bash
|
||||
# Light load (10-50 concurrent users)
|
||||
uv run gunicorn --bind 0.0.0.0:5001 --workers 2 --worker-class gevent app:app
|
||||
|
||||
# Medium load (50-200 concurrent users)
|
||||
uv run gunicorn --bind 0.0.0.0:5001 --workers 4 --worker-class gevent --worker-connections 1000 app:app
|
||||
|
||||
# Heavy load (200-1000 concurrent users)
|
||||
uv run gunicorn --bind 0.0.0.0:5001 --workers 8 --worker-class gevent --worker-connections 2000 --max-requests 1000 app:app
|
||||
```
|
||||
|
||||
**Worker calculation formula**:
|
||||
|
||||
- Workers = (2 × CPU cores) + 1
|
||||
- For SSE/WebSocket: Use gevent worker class
|
||||
- For CPU-bound tasks: Use sync workers
|
||||
|
||||
### Database Optimization
|
||||
|
||||
**PostgreSQL Connection Pool Tuning**:
|
||||
|
||||
For high-concurrency stress testing, increase the PostgreSQL max connections in `docker/middleware.env`:
|
||||
|
||||
```bash
|
||||
# Edit docker/middleware.env
|
||||
POSTGRES_MAX_CONNECTIONS=200 # Default is 100
|
||||
|
||||
# Recommended values for different load levels:
|
||||
# Light load (10-50 users): 100 (default)
|
||||
# Medium load (50-200 users): 200
|
||||
# Heavy load (200-1000 users): 500
|
||||
```
|
||||
|
||||
After changing, restart the PostgreSQL container:
|
||||
|
||||
```bash
|
||||
docker compose -f docker/docker-compose.middleware.yaml down db
|
||||
docker compose -f docker/docker-compose.middleware.yaml up -d db
|
||||
```
|
||||
|
||||
**Note**: Each connection uses ~10MB of RAM. Ensure your database server has sufficient memory:
|
||||
|
||||
- 100 connections: ~1GB RAM
|
||||
- 200 connections: ~2GB RAM
|
||||
- 500 connections: ~5GB RAM
|
||||
|
||||
### System Optimizations
|
||||
|
||||
1. **Increase file descriptor limits**:
|
||||
|
||||
```bash
|
||||
ulimit -n 65536
|
||||
```
|
||||
|
||||
1. **TCP tuning for high concurrency** (Linux):
|
||||
|
||||
```bash
|
||||
# Increase TCP buffer sizes
|
||||
sudo sysctl -w net.core.rmem_max=134217728
|
||||
sudo sysctl -w net.core.wmem_max=134217728
|
||||
|
||||
# Enable TCP fast open
|
||||
sudo sysctl -w net.ipv4.tcp_fastopen=3
|
||||
```
|
||||
|
||||
1. **macOS specific**:
|
||||
|
||||
```bash
|
||||
# Increase maximum connections
|
||||
sudo sysctl -w kern.ipc.somaxconn=2048
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **"ModuleNotFoundError: No module named 'locust'"**:
|
||||
|
||||
```bash
|
||||
# Dependencies are installed automatically, but if needed:
|
||||
uv --project api add --dev locust sseclient-py
|
||||
```
|
||||
|
||||
1. **"API key configuration not found"**:
|
||||
|
||||
```bash
|
||||
# Run setup
|
||||
python scripts/stress-test/setup_all.py
|
||||
```
|
||||
|
||||
1. **Services not running**:
|
||||
|
||||
```bash
|
||||
# Start Dify API with Gunicorn (production mode)
|
||||
cd api
|
||||
uv run gunicorn --bind 0.0.0.0:5001 --workers 4 --worker-class gevent app:app
|
||||
|
||||
# Start Mock OpenAI server
|
||||
python scripts/stress-test/setup/mock_openai_server.py
|
||||
```
|
||||
|
||||
1. **High error rate**:
|
||||
|
||||
- Reduce concurrency level
|
||||
- Check system resources (CPU, memory)
|
||||
- Review API server logs for errors
|
||||
- Increase timeout values if needed
|
||||
|
||||
1. **Permission denied running script**:
|
||||
|
||||
```bash
|
||||
chmod +x run_benchmark.sh
|
||||
```
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Running Multiple Iterations
|
||||
|
||||
```bash
|
||||
# Run stress test 3 times with 60-second intervals
|
||||
for i in {1..3}; do
|
||||
echo "Run $i of 3"
|
||||
./run_locust_stress_test.sh
|
||||
sleep 60
|
||||
done
|
||||
```
|
||||
|
||||
### Custom Locust Options
|
||||
|
||||
Run Locust directly with custom options:
|
||||
|
||||
```bash
|
||||
# With specific user count and spawn rate
|
||||
uv run --project api python -m locust -f scripts/stress-test/sse_benchmark.py \
|
||||
--host http://localhost:5001 --users 50 --spawn-rate 5
|
||||
|
||||
# Generate CSV reports
|
||||
uv run --project api python -m locust -f scripts/stress-test/sse_benchmark.py \
|
||||
--host http://localhost:5001 --csv reports/results
|
||||
|
||||
# Run for specific duration
|
||||
uv run --project api python -m locust -f scripts/stress-test/sse_benchmark.py \
|
||||
--host http://localhost:5001 --run-time 5m --headless
|
||||
```
|
||||
|
||||
### Comparing Results
|
||||
|
||||
```bash
|
||||
# Compare multiple stress test runs
|
||||
ls -la reports/stress_test_*.txt | tail -5
|
||||
```
|
||||
|
||||
## Interpreting Performance Issues
|
||||
|
||||
### High Response Times
|
||||
|
||||
Possible causes:
|
||||
|
||||
- Database query performance
|
||||
- External API latency
|
||||
- Insufficient server resources
|
||||
- Network congestion
|
||||
|
||||
### Low Throughput (RPS < 10)
|
||||
|
||||
Check for:
|
||||
|
||||
- CPU bottlenecks
|
||||
- Memory constraints
|
||||
- Database connection pooling
|
||||
- API rate limiting
|
||||
|
||||
### High Error Rate
|
||||
|
||||
Investigate:
|
||||
|
||||
- Server error logs
|
||||
- Resource exhaustion
|
||||
- Timeout configurations
|
||||
- Connection limits
|
||||
|
||||
## Why Locust?
|
||||
|
||||
Locust was chosen over Drill for this stress test because:
|
||||
|
||||
1. **Proper SSE Support**: Correctly handles streaming responses without premature closure
|
||||
1. **Custom Metrics**: Can track SSE-specific metrics like TTFE and stream duration
|
||||
1. **Web UI**: Real-time monitoring and control via web interface
|
||||
1. **Python Integration**: Seamlessly integrates with existing Python setup code
|
||||
1. **Extensibility**: Easy to customize for specific testing scenarios
|
||||
|
||||
## Contributing
|
||||
|
||||
To improve the stress test suite:
|
||||
|
||||
1. Edit `stress_test.yml` for configuration changes
|
||||
1. Modify `run_locust_stress_test.sh` for workflow improvements
|
||||
1. Update question sets for better coverage
|
||||
1. Add new metrics or analysis features
|
||||
90
scripts/stress-test/cleanup.py
Executable file
90
scripts/stress-test/cleanup.py
Executable file
@@ -0,0 +1,90 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import shutil
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from common import Logger
|
||||
|
||||
|
||||
def cleanup() -> None:
|
||||
"""Clean up all configuration files and reports created during setup and stress testing."""
|
||||
|
||||
log = Logger("Cleanup")
|
||||
log.header("Stress Test Cleanup")
|
||||
|
||||
config_dir = Path(__file__).parent / "setup" / "config"
|
||||
reports_dir = Path(__file__).parent / "reports"
|
||||
|
||||
dirs_to_clean = []
|
||||
if config_dir.exists():
|
||||
dirs_to_clean.append(config_dir)
|
||||
if reports_dir.exists():
|
||||
dirs_to_clean.append(reports_dir)
|
||||
|
||||
if not dirs_to_clean:
|
||||
log.success("No directories to clean. Everything is already clean.")
|
||||
return
|
||||
|
||||
log.info("Cleaning up stress test data...")
|
||||
log.info("This will remove:")
|
||||
for dir_path in dirs_to_clean:
|
||||
log.list_item(str(dir_path))
|
||||
|
||||
# List files that will be deleted
|
||||
log.separator()
|
||||
if config_dir.exists():
|
||||
config_files = list(config_dir.glob("*.json"))
|
||||
if config_files:
|
||||
log.info("Config files to be removed:")
|
||||
for file in config_files:
|
||||
log.list_item(file.name)
|
||||
|
||||
if reports_dir.exists():
|
||||
report_files = list(reports_dir.glob("*"))
|
||||
if report_files:
|
||||
log.info("Report files to be removed:")
|
||||
for file in report_files:
|
||||
log.list_item(file.name)
|
||||
|
||||
# Ask for confirmation if running interactively
|
||||
if sys.stdin.isatty():
|
||||
log.separator()
|
||||
log.warning("This action cannot be undone!")
|
||||
confirmation = input(
|
||||
"Are you sure you want to remove all config and report files? (yes/no): "
|
||||
)
|
||||
|
||||
if confirmation.lower() not in ["yes", "y"]:
|
||||
log.error("Cleanup cancelled.")
|
||||
return
|
||||
|
||||
try:
|
||||
# Remove directories and all their contents
|
||||
for dir_path in dirs_to_clean:
|
||||
shutil.rmtree(dir_path)
|
||||
log.success(f"{dir_path.name} directory removed successfully!")
|
||||
|
||||
log.separator()
|
||||
log.info("To run the setup again, execute:")
|
||||
log.list_item("python setup_all.py")
|
||||
log.info("Or run scripts individually in this order:")
|
||||
log.list_item("python setup/mock_openai_server.py (in a separate terminal)")
|
||||
log.list_item("python setup/setup_admin.py")
|
||||
log.list_item("python setup/login_admin.py")
|
||||
log.list_item("python setup/install_openai_plugin.py")
|
||||
log.list_item("python setup/configure_openai_plugin.py")
|
||||
log.list_item("python setup/import_workflow_app.py")
|
||||
log.list_item("python setup/create_api_key.py")
|
||||
log.list_item("python setup/publish_workflow.py")
|
||||
log.list_item("python setup/run_workflow.py")
|
||||
|
||||
except PermissionError as e:
|
||||
log.error(f"Permission denied: {e}")
|
||||
log.info("Try running with appropriate permissions.")
|
||||
except Exception as e:
|
||||
log.error(f"An error occurred during cleanup: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cleanup()
|
||||
6
scripts/stress-test/common/__init__.py
Normal file
6
scripts/stress-test/common/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
"""Common utilities for Dify benchmark suite."""
|
||||
|
||||
from .config_helper import config_helper
|
||||
from .logger_helper import Logger, ProgressLogger
|
||||
|
||||
__all__ = ["config_helper", "Logger", "ProgressLogger"]
|
||||
240
scripts/stress-test/common/config_helper.py
Normal file
240
scripts/stress-test/common/config_helper.py
Normal file
@@ -0,0 +1,240 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
|
||||
class ConfigHelper:
|
||||
"""Helper class for reading and writing configuration files."""
|
||||
|
||||
def __init__(self, base_dir: Path | None = None):
|
||||
"""Initialize ConfigHelper with base directory.
|
||||
|
||||
Args:
|
||||
base_dir: Base directory for config files. If None, uses setup/config
|
||||
"""
|
||||
if base_dir is None:
|
||||
# Default to config directory in setup folder
|
||||
base_dir = Path(__file__).parent.parent / "setup" / "config"
|
||||
self.base_dir = base_dir
|
||||
self.state_file = "stress_test_state.json"
|
||||
|
||||
def ensure_config_dir(self) -> None:
|
||||
"""Ensure the config directory exists."""
|
||||
self.base_dir.mkdir(exist_ok=True, parents=True)
|
||||
|
||||
def get_config_path(self, filename: str) -> Path:
|
||||
"""Get the full path for a config file.
|
||||
|
||||
Args:
|
||||
filename: Name of the config file (e.g., 'admin_config.json')
|
||||
|
||||
Returns:
|
||||
Full path to the config file
|
||||
"""
|
||||
if not filename.endswith(".json"):
|
||||
filename += ".json"
|
||||
return self.base_dir / filename
|
||||
|
||||
def read_config(self, filename: str) -> dict[str, Any] | None:
|
||||
"""Read a configuration file.
|
||||
|
||||
DEPRECATED: Use read_state() or get_state_section() for new code.
|
||||
This method provides backward compatibility.
|
||||
|
||||
Args:
|
||||
filename: Name of the config file to read
|
||||
|
||||
Returns:
|
||||
Dictionary containing config data, or None if file doesn't exist
|
||||
"""
|
||||
# Provide backward compatibility for old config names
|
||||
if filename in ["admin_config", "token_config", "app_config", "api_key_config"]:
|
||||
section_map = {
|
||||
"admin_config": "admin",
|
||||
"token_config": "auth",
|
||||
"app_config": "app",
|
||||
"api_key_config": "api_key",
|
||||
}
|
||||
return self.get_state_section(section_map[filename])
|
||||
|
||||
config_path = self.get_config_path(filename)
|
||||
|
||||
if not config_path.exists():
|
||||
return None
|
||||
|
||||
try:
|
||||
with open(config_path, "r") as f:
|
||||
return json.load(f)
|
||||
except (json.JSONDecodeError, IOError) as e:
|
||||
print(f"❌ Error reading {filename}: {e}")
|
||||
return None
|
||||
|
||||
def write_config(self, filename: str, data: dict[str, Any]) -> bool:
|
||||
"""Write data to a configuration file.
|
||||
|
||||
DEPRECATED: Use write_state() or update_state_section() for new code.
|
||||
This method provides backward compatibility.
|
||||
|
||||
Args:
|
||||
filename: Name of the config file to write
|
||||
data: Dictionary containing data to save
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise
|
||||
"""
|
||||
# Provide backward compatibility for old config names
|
||||
if filename in ["admin_config", "token_config", "app_config", "api_key_config"]:
|
||||
section_map = {
|
||||
"admin_config": "admin",
|
||||
"token_config": "auth",
|
||||
"app_config": "app",
|
||||
"api_key_config": "api_key",
|
||||
}
|
||||
return self.update_state_section(section_map[filename], data)
|
||||
|
||||
self.ensure_config_dir()
|
||||
config_path = self.get_config_path(filename)
|
||||
|
||||
try:
|
||||
with open(config_path, "w") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
return True
|
||||
except IOError as e:
|
||||
print(f"❌ Error writing {filename}: {e}")
|
||||
return False
|
||||
|
||||
def config_exists(self, filename: str) -> bool:
|
||||
"""Check if a config file exists.
|
||||
|
||||
Args:
|
||||
filename: Name of the config file to check
|
||||
|
||||
Returns:
|
||||
True if file exists, False otherwise
|
||||
"""
|
||||
return self.get_config_path(filename).exists()
|
||||
|
||||
def delete_config(self, filename: str) -> bool:
|
||||
"""Delete a configuration file.
|
||||
|
||||
Args:
|
||||
filename: Name of the config file to delete
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise
|
||||
"""
|
||||
config_path = self.get_config_path(filename)
|
||||
|
||||
if not config_path.exists():
|
||||
return True # Already doesn't exist
|
||||
|
||||
try:
|
||||
config_path.unlink()
|
||||
return True
|
||||
except IOError as e:
|
||||
print(f"❌ Error deleting {filename}: {e}")
|
||||
return False
|
||||
|
||||
def read_state(self) -> dict[str, Any] | None:
|
||||
"""Read the entire stress test state.
|
||||
|
||||
Returns:
|
||||
Dictionary containing all state data, or None if file doesn't exist
|
||||
"""
|
||||
state_path = self.get_config_path(self.state_file)
|
||||
if not state_path.exists():
|
||||
return None
|
||||
|
||||
try:
|
||||
with open(state_path, "r") as f:
|
||||
return json.load(f)
|
||||
except (json.JSONDecodeError, IOError) as e:
|
||||
print(f"❌ Error reading {self.state_file}: {e}")
|
||||
return None
|
||||
|
||||
def write_state(self, data: dict[str, Any]) -> bool:
|
||||
"""Write the entire stress test state.
|
||||
|
||||
Args:
|
||||
data: Dictionary containing all state data to save
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise
|
||||
"""
|
||||
self.ensure_config_dir()
|
||||
state_path = self.get_config_path(self.state_file)
|
||||
|
||||
try:
|
||||
with open(state_path, "w") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
return True
|
||||
except IOError as e:
|
||||
print(f"❌ Error writing {self.state_file}: {e}")
|
||||
return False
|
||||
|
||||
def update_state_section(self, section: str, data: dict[str, Any]) -> bool:
|
||||
"""Update a specific section of the stress test state.
|
||||
|
||||
Args:
|
||||
section: Name of the section to update (e.g., 'admin', 'auth', 'app', 'api_key')
|
||||
data: Dictionary containing section data to save
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise
|
||||
"""
|
||||
state = self.read_state() or {}
|
||||
state[section] = data
|
||||
return self.write_state(state)
|
||||
|
||||
def get_state_section(self, section: str) -> dict[str, Any] | None:
|
||||
"""Get a specific section from the stress test state.
|
||||
|
||||
Args:
|
||||
section: Name of the section to get (e.g., 'admin', 'auth', 'app', 'api_key')
|
||||
|
||||
Returns:
|
||||
Dictionary containing section data, or None if not found
|
||||
"""
|
||||
state = self.read_state()
|
||||
if state:
|
||||
return state.get(section)
|
||||
return None
|
||||
|
||||
def get_token(self) -> str | None:
|
||||
"""Get the access token from auth section.
|
||||
|
||||
Returns:
|
||||
Access token string or None if not found
|
||||
"""
|
||||
auth = self.get_state_section("auth")
|
||||
if auth:
|
||||
return auth.get("access_token")
|
||||
return None
|
||||
|
||||
def get_app_id(self) -> str | None:
|
||||
"""Get the app ID from app section.
|
||||
|
||||
Returns:
|
||||
App ID string or None if not found
|
||||
"""
|
||||
app = self.get_state_section("app")
|
||||
if app:
|
||||
return app.get("app_id")
|
||||
return None
|
||||
|
||||
def get_api_key(self) -> str | None:
|
||||
"""Get the API key token from api_key section.
|
||||
|
||||
Returns:
|
||||
API key token string or None if not found
|
||||
"""
|
||||
api_key = self.get_state_section("api_key")
|
||||
if api_key:
|
||||
return api_key.get("token")
|
||||
return None
|
||||
|
||||
|
||||
# Create a default instance for convenience
|
||||
config_helper = ConfigHelper()
|
||||
220
scripts/stress-test/common/logger_helper.py
Normal file
220
scripts/stress-test/common/logger_helper.py
Normal file
@@ -0,0 +1,220 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import sys
|
||||
import time
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class LogLevel(Enum):
|
||||
"""Log levels with associated colors and symbols."""
|
||||
|
||||
DEBUG = ("🔍", "\033[90m") # Gray
|
||||
INFO = ("ℹ️ ", "\033[94m") # Blue
|
||||
SUCCESS = ("✅", "\033[92m") # Green
|
||||
WARNING = ("⚠️ ", "\033[93m") # Yellow
|
||||
ERROR = ("❌", "\033[91m") # Red
|
||||
STEP = ("🚀", "\033[96m") # Cyan
|
||||
PROGRESS = ("📋", "\033[95m") # Magenta
|
||||
|
||||
|
||||
class Logger:
|
||||
"""Logger class for formatted console output."""
|
||||
|
||||
def __init__(self, name: str | None = None, use_colors: bool = True):
|
||||
"""Initialize logger.
|
||||
|
||||
Args:
|
||||
name: Optional name for the logger (e.g., script name)
|
||||
use_colors: Whether to use ANSI color codes
|
||||
"""
|
||||
self.name = name
|
||||
self.use_colors = use_colors and sys.stdout.isatty()
|
||||
self._reset_color = "\033[0m" if self.use_colors else ""
|
||||
|
||||
def _format_message(self, level: LogLevel, message: str, indent: int = 0) -> str:
|
||||
"""Format a log message with level, color, and indentation.
|
||||
|
||||
Args:
|
||||
level: Log level
|
||||
message: Message to log
|
||||
indent: Number of spaces to indent
|
||||
|
||||
Returns:
|
||||
Formatted message string
|
||||
"""
|
||||
symbol, color = level.value
|
||||
color = color if self.use_colors else ""
|
||||
reset = self._reset_color
|
||||
|
||||
prefix = " " * indent
|
||||
|
||||
if self.name and level in [LogLevel.STEP, LogLevel.ERROR]:
|
||||
return f"{prefix}{color}{symbol} [{self.name}] {message}{reset}"
|
||||
else:
|
||||
return f"{prefix}{color}{symbol} {message}{reset}"
|
||||
|
||||
def debug(self, message: str, indent: int = 0) -> None:
|
||||
"""Log debug message."""
|
||||
print(self._format_message(LogLevel.DEBUG, message, indent))
|
||||
|
||||
def info(self, message: str, indent: int = 0) -> None:
|
||||
"""Log info message."""
|
||||
print(self._format_message(LogLevel.INFO, message, indent))
|
||||
|
||||
def success(self, message: str, indent: int = 0) -> None:
|
||||
"""Log success message."""
|
||||
print(self._format_message(LogLevel.SUCCESS, message, indent))
|
||||
|
||||
def warning(self, message: str, indent: int = 0) -> None:
|
||||
"""Log warning message."""
|
||||
print(self._format_message(LogLevel.WARNING, message, indent))
|
||||
|
||||
def error(self, message: str, indent: int = 0) -> None:
|
||||
"""Log error message."""
|
||||
print(self._format_message(LogLevel.ERROR, message, indent), file=sys.stderr)
|
||||
|
||||
def step(self, message: str, indent: int = 0) -> None:
|
||||
"""Log a step in a process."""
|
||||
print(self._format_message(LogLevel.STEP, message, indent))
|
||||
|
||||
def progress(self, message: str, indent: int = 0) -> None:
|
||||
"""Log progress information."""
|
||||
print(self._format_message(LogLevel.PROGRESS, message, indent))
|
||||
|
||||
def separator(self, char: str = "-", length: int = 60) -> None:
|
||||
"""Print a separator line."""
|
||||
print(char * length)
|
||||
|
||||
def header(self, title: str, width: int = 60) -> None:
|
||||
"""Print a formatted header."""
|
||||
if self.use_colors:
|
||||
print(f"\n\033[1m{'=' * width}\033[0m") # Bold
|
||||
print(f"\033[1m{title.center(width)}\033[0m")
|
||||
print(f"\033[1m{'=' * width}\033[0m\n")
|
||||
else:
|
||||
print(f"\n{'=' * width}")
|
||||
print(title.center(width))
|
||||
print(f"{'=' * width}\n")
|
||||
|
||||
def box(self, title: str, width: int = 60) -> None:
|
||||
"""Print a title in a box."""
|
||||
border = "═" * (width - 2)
|
||||
if self.use_colors:
|
||||
print(f"\033[1m╔{border}╗\033[0m")
|
||||
print(f"\033[1m║{title.center(width - 2)}║\033[0m")
|
||||
print(f"\033[1m╚{border}╝\033[0m")
|
||||
else:
|
||||
print(f"╔{border}╗")
|
||||
print(f"║{title.center(width - 2)}║")
|
||||
print(f"╚{border}╝")
|
||||
|
||||
def list_item(self, item: str, indent: int = 2) -> None:
|
||||
"""Print a list item."""
|
||||
prefix = " " * indent
|
||||
print(f"{prefix}• {item}")
|
||||
|
||||
def key_value(self, key: str, value: str, indent: int = 2) -> None:
|
||||
"""Print a key-value pair."""
|
||||
prefix = " " * indent
|
||||
if self.use_colors:
|
||||
print(f"{prefix}\033[1m{key}:\033[0m {value}")
|
||||
else:
|
||||
print(f"{prefix}{key}: {value}")
|
||||
|
||||
def spinner_start(self, message: str) -> None:
|
||||
"""Start a spinner (simple implementation)."""
|
||||
sys.stdout.write(f"\r{message}... ")
|
||||
sys.stdout.flush()
|
||||
|
||||
def spinner_stop(self, success: bool = True, message: str | None = None) -> None:
|
||||
"""Stop the spinner and show result."""
|
||||
if success:
|
||||
symbol = "✅" if message else "Done"
|
||||
sys.stdout.write(f"\r{symbol} {message or ''}\n")
|
||||
else:
|
||||
symbol = "❌" if message else "Failed"
|
||||
sys.stdout.write(f"\r{symbol} {message or ''}\n")
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
class ProgressLogger:
|
||||
"""Logger for tracking progress through multiple steps."""
|
||||
|
||||
def __init__(self, total_steps: int, logger: Logger | None = None):
|
||||
"""Initialize progress logger.
|
||||
|
||||
Args:
|
||||
total_steps: Total number of steps
|
||||
logger: Logger instance to use (creates new if None)
|
||||
"""
|
||||
self.total_steps = total_steps
|
||||
self.current_step = 0
|
||||
self.logger = logger or Logger()
|
||||
self.start_time = time.time()
|
||||
|
||||
def next_step(self, description: str) -> None:
|
||||
"""Move to next step and log it."""
|
||||
self.current_step += 1
|
||||
elapsed = time.time() - self.start_time
|
||||
|
||||
if self.logger.use_colors:
|
||||
progress_bar = self._create_progress_bar()
|
||||
print(
|
||||
f"\n\033[1m[Step {self.current_step}/{self.total_steps}]\033[0m {progress_bar}"
|
||||
)
|
||||
self.logger.step(f"{description} (Elapsed: {elapsed:.1f}s)")
|
||||
else:
|
||||
print(f"\n[Step {self.current_step}/{self.total_steps}]")
|
||||
self.logger.step(f"{description} (Elapsed: {elapsed:.1f}s)")
|
||||
|
||||
def _create_progress_bar(self, width: int = 20) -> str:
|
||||
"""Create a simple progress bar."""
|
||||
filled = int(width * self.current_step / self.total_steps)
|
||||
bar = "█" * filled + "░" * (width - filled)
|
||||
percentage = int(100 * self.current_step / self.total_steps)
|
||||
return f"[{bar}] {percentage}%"
|
||||
|
||||
def complete(self) -> None:
|
||||
"""Mark progress as complete."""
|
||||
elapsed = time.time() - self.start_time
|
||||
self.logger.success(f"All steps completed! Total time: {elapsed:.1f}s")
|
||||
|
||||
|
||||
# Create default logger instance
|
||||
logger = Logger()
|
||||
|
||||
|
||||
# Convenience functions using default logger
|
||||
def debug(message: str, indent: int = 0) -> None:
|
||||
"""Log debug message using default logger."""
|
||||
logger.debug(message, indent)
|
||||
|
||||
|
||||
def info(message: str, indent: int = 0) -> None:
|
||||
"""Log info message using default logger."""
|
||||
logger.info(message, indent)
|
||||
|
||||
|
||||
def success(message: str, indent: int = 0) -> None:
|
||||
"""Log success message using default logger."""
|
||||
logger.success(message, indent)
|
||||
|
||||
|
||||
def warning(message: str, indent: int = 0) -> None:
|
||||
"""Log warning message using default logger."""
|
||||
logger.warning(message, indent)
|
||||
|
||||
|
||||
def error(message: str, indent: int = 0) -> None:
|
||||
"""Log error message using default logger."""
|
||||
logger.error(message, indent)
|
||||
|
||||
|
||||
def step(message: str, indent: int = 0) -> None:
|
||||
"""Log step using default logger."""
|
||||
logger.step(message, indent)
|
||||
|
||||
|
||||
def progress(message: str, indent: int = 0) -> None:
|
||||
"""Log progress using default logger."""
|
||||
logger.progress(message, indent)
|
||||
37
scripts/stress-test/locust.conf
Normal file
37
scripts/stress-test/locust.conf
Normal file
@@ -0,0 +1,37 @@
|
||||
# Locust configuration file for Dify SSE benchmark
|
||||
|
||||
# Target host
|
||||
host = http://localhost:5001
|
||||
|
||||
# Number of users to simulate
|
||||
users = 10
|
||||
|
||||
# Spawn rate (users per second)
|
||||
spawn-rate = 2
|
||||
|
||||
# Run time (use format like 30s, 5m, 1h)
|
||||
run-time = 1m
|
||||
|
||||
# Locustfile to use
|
||||
locustfile = scripts/stress-test/sse_benchmark.py
|
||||
|
||||
# Headless mode (no web UI)
|
||||
headless = true
|
||||
|
||||
# Print stats in the console
|
||||
print-stats = true
|
||||
|
||||
# Only print summary stats
|
||||
only-summary = false
|
||||
|
||||
# Reset statistics after ramp-up
|
||||
reset-stats = false
|
||||
|
||||
# Log level
|
||||
loglevel = INFO
|
||||
|
||||
# CSV output (uncomment to enable)
|
||||
# csv = reports/locust_results
|
||||
|
||||
# HTML report (uncomment to enable)
|
||||
# html = reports/locust_report.html
|
||||
202
scripts/stress-test/run_locust_stress_test.sh
Executable file
202
scripts/stress-test/run_locust_stress_test.sh
Executable file
@@ -0,0 +1,202 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Run Dify SSE Stress Test using Locust
|
||||
|
||||
set -e
|
||||
|
||||
# Get the directory where this script is located
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
# Go to project root first, then to script dir
|
||||
PROJECT_ROOT="$( cd "${SCRIPT_DIR}/../.." && pwd )"
|
||||
cd "${PROJECT_ROOT}"
|
||||
STRESS_TEST_DIR="scripts/stress-test"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
CYAN='\033[0;36m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Configuration
|
||||
TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
|
||||
REPORT_DIR="${STRESS_TEST_DIR}/reports"
|
||||
CSV_PREFIX="${REPORT_DIR}/locust_${TIMESTAMP}"
|
||||
HTML_REPORT="${REPORT_DIR}/locust_report_${TIMESTAMP}.html"
|
||||
SUMMARY_REPORT="${REPORT_DIR}/locust_summary_${TIMESTAMP}.txt"
|
||||
|
||||
# Create reports directory if it doesn't exist
|
||||
mkdir -p "${REPORT_DIR}"
|
||||
|
||||
echo -e "${BLUE}╔════════════════════════════════════════════════════════════════╗${NC}"
|
||||
echo -e "${BLUE}║ DIFY SSE WORKFLOW STRESS TEST (LOCUST) ║${NC}"
|
||||
echo -e "${BLUE}╚════════════════════════════════════════════════════════════════╝${NC}"
|
||||
echo
|
||||
|
||||
# Check if services are running
|
||||
echo -e "${YELLOW}Checking services...${NC}"
|
||||
|
||||
# Check Dify API
|
||||
if curl -s -f http://localhost:5001/health > /dev/null 2>&1; then
|
||||
echo -e "${GREEN}✓ Dify API is running${NC}"
|
||||
|
||||
# Warn if running in debug mode (check for werkzeug in process)
|
||||
if ps aux | grep -v grep | grep -q "werkzeug.*5001\|flask.*run.*5001"; then
|
||||
echo -e "${YELLOW}⚠ WARNING: API appears to be running in debug mode (Flask development server)${NC}"
|
||||
echo -e "${YELLOW} This will give inaccurate benchmark results!${NC}"
|
||||
echo -e "${YELLOW} For accurate benchmarking, restart with Gunicorn:${NC}"
|
||||
echo -e "${CYAN} cd api && uv run gunicorn --bind 0.0.0.0:5001 --workers 4 --worker-class gevent app:app${NC}"
|
||||
echo
|
||||
echo -n "Continue anyway? (not recommended) [y/N]: "
|
||||
read -t 10 continue_debug || continue_debug="n"
|
||||
if [ "$continue_debug" != "y" ] && [ "$continue_debug" != "Y" ]; then
|
||||
echo -e "${RED}Benchmark cancelled. Please restart API with Gunicorn.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo -e "${RED}✗ Dify API is not running on port 5001${NC}"
|
||||
echo -e "${YELLOW} Start it with Gunicorn for accurate benchmarking:${NC}"
|
||||
echo -e "${CYAN} cd api && uv run gunicorn --bind 0.0.0.0:5001 --workers 4 --worker-class gevent app:app${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check Mock OpenAI server
|
||||
if curl -s -f http://localhost:5004/v1/models > /dev/null 2>&1; then
|
||||
echo -e "${GREEN}✓ Mock OpenAI server is running${NC}"
|
||||
else
|
||||
echo -e "${RED}✗ Mock OpenAI server is not running on port 5004${NC}"
|
||||
echo -e "${YELLOW} Start it with: python scripts/stress-test/setup/mock_openai_server.py${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check API token exists
|
||||
if [ ! -f "${STRESS_TEST_DIR}/setup/config/stress_test_state.json" ]; then
|
||||
echo -e "${RED}✗ Stress test configuration not found${NC}"
|
||||
echo -e "${YELLOW} Run setup first: python scripts/stress-test/setup_all.py${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
API_TOKEN=$(python3 -c "import json; state = json.load(open('${STRESS_TEST_DIR}/setup/config/stress_test_state.json')); print(state.get('api_key', {}).get('token', ''))" 2>/dev/null)
|
||||
if [ -z "$API_TOKEN" ]; then
|
||||
echo -e "${RED}✗ Failed to read API token from stress test state${NC}"
|
||||
exit 1
|
||||
fi
|
||||
echo -e "${GREEN}✓ API token found: ${API_TOKEN:0:10}...${NC}"
|
||||
|
||||
echo
|
||||
echo -e "${CYAN}═══════════════════════════════════════════════════════════════${NC}"
|
||||
echo -e "${CYAN} STRESS TEST PARAMETERS ${NC}"
|
||||
echo -e "${CYAN}═══════════════════════════════════════════════════════════════${NC}"
|
||||
|
||||
# Parse configuration
|
||||
USERS=$(grep "^users" ${STRESS_TEST_DIR}/locust.conf | cut -d'=' -f2 | tr -d ' ')
|
||||
SPAWN_RATE=$(grep "^spawn-rate" ${STRESS_TEST_DIR}/locust.conf | cut -d'=' -f2 | tr -d ' ')
|
||||
RUN_TIME=$(grep "^run-time" ${STRESS_TEST_DIR}/locust.conf | cut -d'=' -f2 | tr -d ' ')
|
||||
|
||||
echo -e " ${YELLOW}Users:${NC} $USERS concurrent users"
|
||||
echo -e " ${YELLOW}Spawn Rate:${NC} $SPAWN_RATE users/second"
|
||||
echo -e " ${YELLOW}Duration:${NC} $RUN_TIME"
|
||||
echo -e " ${YELLOW}Mode:${NC} SSE Streaming"
|
||||
echo
|
||||
|
||||
# Ask user for run mode
|
||||
echo -e "${YELLOW}Select run mode:${NC}"
|
||||
echo " 1) Headless (CLI only) - Default"
|
||||
echo " 2) Web UI (http://localhost:8089)"
|
||||
echo -n "Choice [1]: "
|
||||
read -t 10 choice || choice="1"
|
||||
echo
|
||||
|
||||
# Use SSE stress test script
|
||||
LOCUST_SCRIPT="${STRESS_TEST_DIR}/sse_benchmark.py"
|
||||
|
||||
# Prepare Locust command
|
||||
if [ "$choice" = "2" ]; then
|
||||
echo -e "${BLUE}Starting Locust with Web UI...${NC}"
|
||||
echo -e "${YELLOW}Access the web interface at: ${CYAN}http://localhost:8089${NC}"
|
||||
echo
|
||||
|
||||
# Run with web UI
|
||||
uv --project api run locust \
|
||||
-f ${LOCUST_SCRIPT} \
|
||||
--host http://localhost:5001 \
|
||||
--web-port 8089
|
||||
else
|
||||
echo -e "${BLUE}Starting stress test in headless mode...${NC}"
|
||||
echo
|
||||
|
||||
# Run in headless mode with CSV output
|
||||
uv --project api run locust \
|
||||
-f ${LOCUST_SCRIPT} \
|
||||
--host http://localhost:5001 \
|
||||
--users $USERS \
|
||||
--spawn-rate $SPAWN_RATE \
|
||||
--run-time $RUN_TIME \
|
||||
--headless \
|
||||
--print-stats \
|
||||
--csv=$CSV_PREFIX \
|
||||
--html=$HTML_REPORT \
|
||||
2>&1 | tee $SUMMARY_REPORT
|
||||
|
||||
echo
|
||||
echo -e "${GREEN}═══════════════════════════════════════════════════════════════${NC}"
|
||||
echo -e "${GREEN} STRESS TEST COMPLETE ${NC}"
|
||||
echo -e "${GREEN}═══════════════════════════════════════════════════════════════${NC}"
|
||||
echo
|
||||
echo -e "${BLUE}Reports generated:${NC}"
|
||||
echo -e " ${YELLOW}Summary:${NC} $SUMMARY_REPORT"
|
||||
echo -e " ${YELLOW}HTML Report:${NC} $HTML_REPORT"
|
||||
echo -e " ${YELLOW}CSV Stats:${NC} ${CSV_PREFIX}_stats.csv"
|
||||
echo -e " ${YELLOW}CSV History:${NC} ${CSV_PREFIX}_stats_history.csv"
|
||||
echo
|
||||
echo -e "${CYAN}View HTML report:${NC}"
|
||||
echo " open $HTML_REPORT # macOS"
|
||||
echo " xdg-open $HTML_REPORT # Linux"
|
||||
echo
|
||||
|
||||
# Parse and display key metrics
|
||||
echo -e "${CYAN}═══════════════════════════════════════════════════════════════${NC}"
|
||||
echo -e "${CYAN} KEY METRICS ${NC}"
|
||||
echo -e "${CYAN}═══════════════════════════════════════════════════════════════${NC}"
|
||||
|
||||
if [ -f "${CSV_PREFIX}_stats.csv" ]; then
|
||||
python3 - <<EOF
|
||||
import csv
|
||||
import sys
|
||||
|
||||
csv_file = "${CSV_PREFIX}_stats.csv"
|
||||
|
||||
try:
|
||||
with open(csv_file, 'r') as f:
|
||||
reader = csv.DictReader(f)
|
||||
rows = list(reader)
|
||||
|
||||
# Find the aggregated row
|
||||
for row in rows:
|
||||
if row.get('Name') == 'Aggregated':
|
||||
print(f" Total Requests: {row.get('Request Count', 'N/A')}")
|
||||
print(f" Failure Rate: {row.get('Failure Count', '0')} failures")
|
||||
print(f" Median Response: {row.get('Median Response Time', 'N/A')} ms")
|
||||
print(f" 95%ile Response: {row.get('95%', 'N/A')} ms")
|
||||
print(f" 99%ile Response: {row.get('99%', 'N/A')} ms")
|
||||
print(f" RPS: {row.get('Requests/s', 'N/A')}")
|
||||
break
|
||||
|
||||
# Show SSE-specific metrics
|
||||
print()
|
||||
print("SSE Streaming Metrics:")
|
||||
for row in rows:
|
||||
if 'Time to First Event' in row.get('Name', ''):
|
||||
print(f" Time to First Event: {row.get('Median Response Time', 'N/A')} ms (median)")
|
||||
elif 'Stream Duration' in row.get('Name', ''):
|
||||
print(f" Stream Duration: {row.get('Median Response Time', 'N/A')} ms (median)")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Could not parse metrics: {e}")
|
||||
EOF
|
||||
fi
|
||||
|
||||
echo -e "${CYAN}═══════════════════════════════════════════════════════════════${NC}"
|
||||
fi
|
||||
108
scripts/stress-test/setup/configure_openai_plugin.py
Executable file
108
scripts/stress-test/setup/configure_openai_plugin.py
Executable file
@@ -0,0 +1,108 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import httpx
|
||||
from common import config_helper
|
||||
from common import Logger
|
||||
|
||||
|
||||
def configure_openai_plugin() -> None:
|
||||
"""Configure OpenAI plugin with mock server credentials."""
|
||||
|
||||
log = Logger("ConfigPlugin")
|
||||
log.header("Configuring OpenAI Plugin")
|
||||
|
||||
# Read token from config
|
||||
access_token = config_helper.get_token()
|
||||
if not access_token:
|
||||
log.error("No access token found in config")
|
||||
log.info("Please run login_admin.py first to get access token")
|
||||
return
|
||||
|
||||
log.step("Configuring OpenAI plugin with mock server...")
|
||||
|
||||
# API endpoint for plugin configuration
|
||||
base_url = "http://localhost:5001"
|
||||
config_endpoint = f"{base_url}/console/api/workspaces/current/model-providers/langgenius/openai/openai/credentials"
|
||||
|
||||
# Configuration payload with mock server
|
||||
config_payload = {
|
||||
"credentials": {
|
||||
"openai_api_key": "apikey",
|
||||
"openai_organization": None,
|
||||
"openai_api_base": "http://host.docker.internal:5004",
|
||||
}
|
||||
}
|
||||
|
||||
headers = {
|
||||
"Accept": "*/*",
|
||||
"Accept-Language": "en-US,en;q=0.9",
|
||||
"Cache-Control": "no-cache",
|
||||
"Connection": "keep-alive",
|
||||
"DNT": "1",
|
||||
"Origin": "http://localhost:3000",
|
||||
"Pragma": "no-cache",
|
||||
"Referer": "http://localhost:3000/",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-site",
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
|
||||
"authorization": f"Bearer {access_token}",
|
||||
"content-type": "application/json",
|
||||
"sec-ch-ua": '"Not;A=Brand";v="99", "Google Chrome";v="139", "Chromium";v="139"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
}
|
||||
|
||||
cookies = {"locale": "en-US"}
|
||||
|
||||
try:
|
||||
# Make the configuration request
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
config_endpoint,
|
||||
json=config_payload,
|
||||
headers=headers,
|
||||
cookies=cookies,
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
log.success("OpenAI plugin configured successfully!")
|
||||
log.key_value(
|
||||
"API Base", config_payload["credentials"]["openai_api_base"]
|
||||
)
|
||||
log.key_value(
|
||||
"API Key", config_payload["credentials"]["openai_api_key"]
|
||||
)
|
||||
|
||||
elif response.status_code == 201:
|
||||
log.success("OpenAI plugin credentials created successfully!")
|
||||
log.key_value(
|
||||
"API Base", config_payload["credentials"]["openai_api_base"]
|
||||
)
|
||||
log.key_value(
|
||||
"API Key", config_payload["credentials"]["openai_api_key"]
|
||||
)
|
||||
|
||||
elif response.status_code == 401:
|
||||
log.error("Configuration failed: Unauthorized")
|
||||
log.info("Token may have expired. Please run login_admin.py again")
|
||||
else:
|
||||
log.error(
|
||||
f"Configuration failed with status code: {response.status_code}"
|
||||
)
|
||||
log.debug(f"Response: {response.text}")
|
||||
|
||||
except httpx.ConnectError:
|
||||
log.error("Could not connect to Dify API at http://localhost:5001")
|
||||
log.info("Make sure the API server is running with: ./dev/start-api")
|
||||
except Exception as e:
|
||||
log.error(f"An error occurred: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
configure_openai_plugin()
|
||||
117
scripts/stress-test/setup/create_api_key.py
Executable file
117
scripts/stress-test/setup/create_api_key.py
Executable file
@@ -0,0 +1,117 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import httpx
|
||||
import json
|
||||
from common import config_helper
|
||||
from common import Logger
|
||||
|
||||
|
||||
def create_api_key() -> None:
|
||||
"""Create API key for the imported app."""
|
||||
|
||||
log = Logger("CreateAPIKey")
|
||||
log.header("Creating API Key")
|
||||
|
||||
# Read token from config
|
||||
access_token = config_helper.get_token()
|
||||
if not access_token:
|
||||
log.error("No access token found in config")
|
||||
return
|
||||
|
||||
# Read app_id from config
|
||||
app_id = config_helper.get_app_id()
|
||||
if not app_id:
|
||||
log.error("No app_id found in config")
|
||||
log.info("Please run import_workflow_app.py first to import the app")
|
||||
return
|
||||
|
||||
log.step(f"Creating API key for app: {app_id}")
|
||||
|
||||
# API endpoint for creating API key
|
||||
base_url = "http://localhost:5001"
|
||||
api_key_endpoint = f"{base_url}/console/api/apps/{app_id}/api-keys"
|
||||
|
||||
headers = {
|
||||
"Accept": "*/*",
|
||||
"Accept-Language": "en-US,en;q=0.9",
|
||||
"Cache-Control": "no-cache",
|
||||
"Connection": "keep-alive",
|
||||
"Content-Length": "0",
|
||||
"DNT": "1",
|
||||
"Origin": "http://localhost:3000",
|
||||
"Pragma": "no-cache",
|
||||
"Referer": "http://localhost:3000/",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-site",
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
|
||||
"authorization": f"Bearer {access_token}",
|
||||
"content-type": "application/json",
|
||||
"sec-ch-ua": '"Not;A=Brand";v="99", "Google Chrome";v="139", "Chromium";v="139"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
}
|
||||
|
||||
cookies = {"locale": "en-US"}
|
||||
|
||||
try:
|
||||
# Make the API key creation request
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
api_key_endpoint,
|
||||
headers=headers,
|
||||
cookies=cookies,
|
||||
)
|
||||
|
||||
if response.status_code == 200 or response.status_code == 201:
|
||||
response_data = response.json()
|
||||
|
||||
api_key_id = response_data.get("id")
|
||||
api_key_token = response_data.get("token")
|
||||
|
||||
if api_key_token:
|
||||
log.success("API key created successfully!")
|
||||
log.key_value("Key ID", api_key_id)
|
||||
log.key_value("Token", api_key_token)
|
||||
log.key_value("Type", response_data.get("type"))
|
||||
|
||||
# Save API key to config
|
||||
api_key_config = {
|
||||
"id": api_key_id,
|
||||
"token": api_key_token,
|
||||
"type": response_data.get("type"),
|
||||
"app_id": app_id,
|
||||
"created_at": response_data.get("created_at"),
|
||||
}
|
||||
|
||||
if config_helper.write_config("api_key_config", api_key_config):
|
||||
log.info(
|
||||
f"API key saved to: {config_helper.get_config_path('benchmark_state')}"
|
||||
)
|
||||
else:
|
||||
log.error("No API token received")
|
||||
log.debug(f"Response: {json.dumps(response_data, indent=2)}")
|
||||
|
||||
elif response.status_code == 401:
|
||||
log.error("API key creation failed: Unauthorized")
|
||||
log.info("Token may have expired. Please run login_admin.py again")
|
||||
else:
|
||||
log.error(
|
||||
f"API key creation failed with status code: {response.status_code}"
|
||||
)
|
||||
log.debug(f"Response: {response.text}")
|
||||
|
||||
except httpx.ConnectError:
|
||||
log.error("Could not connect to Dify API at http://localhost:5001")
|
||||
log.info("Make sure the API server is running with: ./dev/start-api")
|
||||
except Exception as e:
|
||||
log.error(f"An error occurred: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
create_api_key()
|
||||
176
scripts/stress-test/setup/dsl/workflow_llm.yml
Normal file
176
scripts/stress-test/setup/dsl/workflow_llm.yml
Normal file
@@ -0,0 +1,176 @@
|
||||
app:
|
||||
description: ''
|
||||
icon: 🤖
|
||||
icon_background: '#FFEAD5'
|
||||
mode: workflow
|
||||
name: workflow_llm
|
||||
use_icon_as_answer_icon: false
|
||||
dependencies:
|
||||
- current_identifier: null
|
||||
type: marketplace
|
||||
value:
|
||||
marketplace_plugin_unique_identifier: langgenius/openai:0.2.5@373362a028986aae53a7baf73a7f11991ba3c22c69eaf97d6cde048cfd4a9f98
|
||||
kind: app
|
||||
version: 0.4.0
|
||||
workflow:
|
||||
conversation_variables: []
|
||||
environment_variables: []
|
||||
features:
|
||||
file_upload:
|
||||
allowed_file_extensions:
|
||||
- .JPG
|
||||
- .JPEG
|
||||
- .PNG
|
||||
- .GIF
|
||||
- .WEBP
|
||||
- .SVG
|
||||
allowed_file_types:
|
||||
- image
|
||||
allowed_file_upload_methods:
|
||||
- local_file
|
||||
- remote_url
|
||||
enabled: false
|
||||
fileUploadConfig:
|
||||
audio_file_size_limit: 50
|
||||
batch_count_limit: 5
|
||||
file_size_limit: 15
|
||||
image_file_size_limit: 10
|
||||
video_file_size_limit: 100
|
||||
workflow_file_upload_limit: 10
|
||||
image:
|
||||
enabled: false
|
||||
number_limits: 3
|
||||
transfer_methods:
|
||||
- local_file
|
||||
- remote_url
|
||||
number_limits: 3
|
||||
opening_statement: ''
|
||||
retriever_resource:
|
||||
enabled: true
|
||||
sensitive_word_avoidance:
|
||||
enabled: false
|
||||
speech_to_text:
|
||||
enabled: false
|
||||
suggested_questions: []
|
||||
suggested_questions_after_answer:
|
||||
enabled: false
|
||||
text_to_speech:
|
||||
enabled: false
|
||||
language: ''
|
||||
voice: ''
|
||||
graph:
|
||||
edges:
|
||||
- data:
|
||||
isInIteration: false
|
||||
isInLoop: false
|
||||
sourceType: start
|
||||
targetType: llm
|
||||
id: 1757611990947-source-1757611992921-target
|
||||
source: '1757611990947'
|
||||
sourceHandle: source
|
||||
target: '1757611992921'
|
||||
targetHandle: target
|
||||
type: custom
|
||||
zIndex: 0
|
||||
- data:
|
||||
isInIteration: false
|
||||
isInLoop: false
|
||||
sourceType: llm
|
||||
targetType: end
|
||||
id: 1757611992921-source-1757611996447-target
|
||||
source: '1757611992921'
|
||||
sourceHandle: source
|
||||
target: '1757611996447'
|
||||
targetHandle: target
|
||||
type: custom
|
||||
zIndex: 0
|
||||
nodes:
|
||||
- data:
|
||||
desc: ''
|
||||
selected: false
|
||||
title: Start
|
||||
type: start
|
||||
variables:
|
||||
- label: question
|
||||
max_length: null
|
||||
options: []
|
||||
required: true
|
||||
type: text-input
|
||||
variable: question
|
||||
height: 90
|
||||
id: '1757611990947'
|
||||
position:
|
||||
x: 30
|
||||
y: 245
|
||||
positionAbsolute:
|
||||
x: 30
|
||||
y: 245
|
||||
selected: false
|
||||
sourcePosition: right
|
||||
targetPosition: left
|
||||
type: custom
|
||||
width: 244
|
||||
- data:
|
||||
context:
|
||||
enabled: false
|
||||
variable_selector: []
|
||||
desc: ''
|
||||
model:
|
||||
completion_params:
|
||||
temperature: 0.7
|
||||
mode: chat
|
||||
name: gpt-4o
|
||||
provider: langgenius/openai/openai
|
||||
prompt_template:
|
||||
- id: c165fcb6-f1f0-42f2-abab-e81982434deb
|
||||
role: system
|
||||
text: ''
|
||||
- role: user
|
||||
text: '{{#1757611990947.question#}}'
|
||||
selected: false
|
||||
title: LLM
|
||||
type: llm
|
||||
variables: []
|
||||
vision:
|
||||
enabled: false
|
||||
height: 90
|
||||
id: '1757611992921'
|
||||
position:
|
||||
x: 334
|
||||
y: 245
|
||||
positionAbsolute:
|
||||
x: 334
|
||||
y: 245
|
||||
selected: false
|
||||
sourcePosition: right
|
||||
targetPosition: left
|
||||
type: custom
|
||||
width: 244
|
||||
- data:
|
||||
desc: ''
|
||||
outputs:
|
||||
- value_selector:
|
||||
- '1757611992921'
|
||||
- text
|
||||
value_type: string
|
||||
variable: answer
|
||||
selected: false
|
||||
title: End
|
||||
type: end
|
||||
height: 90
|
||||
id: '1757611996447'
|
||||
position:
|
||||
x: 638
|
||||
y: 245
|
||||
positionAbsolute:
|
||||
x: 638
|
||||
y: 245
|
||||
selected: true
|
||||
sourcePosition: right
|
||||
targetPosition: left
|
||||
type: custom
|
||||
width: 244
|
||||
viewport:
|
||||
x: 0
|
||||
y: 0
|
||||
zoom: 0.7
|
||||
131
scripts/stress-test/setup/import_workflow_app.py
Executable file
131
scripts/stress-test/setup/import_workflow_app.py
Executable file
@@ -0,0 +1,131 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import httpx
|
||||
import json
|
||||
from common import config_helper, Logger
|
||||
|
||||
|
||||
def import_workflow_app() -> None:
|
||||
"""Import workflow app from DSL file and save app_id."""
|
||||
|
||||
log = Logger("ImportApp")
|
||||
log.header("Importing Workflow Application")
|
||||
|
||||
# Read token from config
|
||||
access_token = config_helper.get_token()
|
||||
if not access_token:
|
||||
log.error("No access token found in config")
|
||||
log.info("Please run login_admin.py first to get access token")
|
||||
return
|
||||
|
||||
# Read workflow DSL file
|
||||
dsl_path = Path(__file__).parent / "dsl" / "workflow_llm.yml"
|
||||
|
||||
if not dsl_path.exists():
|
||||
log.error(f"DSL file not found: {dsl_path}")
|
||||
return
|
||||
|
||||
with open(dsl_path, "r") as f:
|
||||
yaml_content = f.read()
|
||||
|
||||
log.step("Importing workflow app from DSL...")
|
||||
log.key_value("DSL file", dsl_path.name)
|
||||
|
||||
# API endpoint for app import
|
||||
base_url = "http://localhost:5001"
|
||||
import_endpoint = f"{base_url}/console/api/apps/imports"
|
||||
|
||||
# Import payload
|
||||
import_payload = {"mode": "yaml-content", "yaml_content": yaml_content}
|
||||
|
||||
headers = {
|
||||
"Accept": "*/*",
|
||||
"Accept-Language": "en-US,en;q=0.9",
|
||||
"Cache-Control": "no-cache",
|
||||
"Connection": "keep-alive",
|
||||
"DNT": "1",
|
||||
"Origin": "http://localhost:3000",
|
||||
"Pragma": "no-cache",
|
||||
"Referer": "http://localhost:3000/",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-site",
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
|
||||
"authorization": f"Bearer {access_token}",
|
||||
"content-type": "application/json",
|
||||
"sec-ch-ua": '"Not;A=Brand";v="99", "Google Chrome";v="139", "Chromium";v="139"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
}
|
||||
|
||||
cookies = {"locale": "en-US"}
|
||||
|
||||
try:
|
||||
# Make the import request
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
import_endpoint,
|
||||
json=import_payload,
|
||||
headers=headers,
|
||||
cookies=cookies,
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
response_data = response.json()
|
||||
|
||||
# Check import status
|
||||
if response_data.get("status") == "completed":
|
||||
app_id = response_data.get("app_id")
|
||||
|
||||
if app_id:
|
||||
log.success("Workflow app imported successfully!")
|
||||
log.key_value("App ID", app_id)
|
||||
log.key_value("App Mode", response_data.get("app_mode"))
|
||||
log.key_value(
|
||||
"DSL Version", response_data.get("imported_dsl_version")
|
||||
)
|
||||
|
||||
# Save app_id to config
|
||||
app_config = {
|
||||
"app_id": app_id,
|
||||
"app_mode": response_data.get("app_mode"),
|
||||
"app_name": "workflow_llm",
|
||||
"dsl_version": response_data.get("imported_dsl_version"),
|
||||
}
|
||||
|
||||
if config_helper.write_config("app_config", app_config):
|
||||
log.info(
|
||||
f"App config saved to: {config_helper.get_config_path('benchmark_state')}"
|
||||
)
|
||||
else:
|
||||
log.error("Import completed but no app_id received")
|
||||
log.debug(f"Response: {json.dumps(response_data, indent=2)}")
|
||||
|
||||
elif response_data.get("status") == "failed":
|
||||
log.error("Import failed")
|
||||
log.error(f"Error: {response_data.get('error')}")
|
||||
else:
|
||||
log.warning(f"Import status: {response_data.get('status')}")
|
||||
log.debug(f"Response: {json.dumps(response_data, indent=2)}")
|
||||
|
||||
elif response.status_code == 401:
|
||||
log.error("Import failed: Unauthorized")
|
||||
log.info("Token may have expired. Please run login_admin.py again")
|
||||
else:
|
||||
log.error(f"Import failed with status code: {response.status_code}")
|
||||
log.debug(f"Response: {response.text}")
|
||||
|
||||
except httpx.ConnectError:
|
||||
log.error("Could not connect to Dify API at http://localhost:5001")
|
||||
log.info("Make sure the API server is running with: ./dev/start-api")
|
||||
except Exception as e:
|
||||
log.error(f"An error occurred: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import_workflow_app()
|
||||
165
scripts/stress-test/setup/install_openai_plugin.py
Executable file
165
scripts/stress-test/setup/install_openai_plugin.py
Executable file
@@ -0,0 +1,165 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import httpx
|
||||
import time
|
||||
from common import config_helper
|
||||
from common import Logger
|
||||
|
||||
|
||||
def install_openai_plugin() -> None:
|
||||
"""Install OpenAI plugin using saved access token."""
|
||||
|
||||
log = Logger("InstallPlugin")
|
||||
log.header("Installing OpenAI Plugin")
|
||||
|
||||
# Read token from config
|
||||
access_token = config_helper.get_token()
|
||||
if not access_token:
|
||||
log.error("No access token found in config")
|
||||
log.info("Please run login_admin.py first to get access token")
|
||||
return
|
||||
|
||||
log.step("Installing OpenAI plugin...")
|
||||
|
||||
# API endpoint for plugin installation
|
||||
base_url = "http://localhost:5001"
|
||||
install_endpoint = (
|
||||
f"{base_url}/console/api/workspaces/current/plugin/install/marketplace"
|
||||
)
|
||||
|
||||
# Plugin identifier
|
||||
plugin_payload = {
|
||||
"plugin_unique_identifiers": [
|
||||
"langgenius/openai:0.2.5@373362a028986aae53a7baf73a7f11991ba3c22c69eaf97d6cde048cfd4a9f98"
|
||||
]
|
||||
}
|
||||
|
||||
headers = {
|
||||
"Accept": "*/*",
|
||||
"Accept-Language": "en-US,en;q=0.9",
|
||||
"Cache-Control": "no-cache",
|
||||
"Connection": "keep-alive",
|
||||
"DNT": "1",
|
||||
"Origin": "http://localhost:3000",
|
||||
"Pragma": "no-cache",
|
||||
"Referer": "http://localhost:3000/",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-site",
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
|
||||
"authorization": f"Bearer {access_token}",
|
||||
"content-type": "application/json",
|
||||
"sec-ch-ua": '"Not;A=Brand";v="99", "Google Chrome";v="139", "Chromium";v="139"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
}
|
||||
|
||||
cookies = {"locale": "en-US"}
|
||||
|
||||
try:
|
||||
# Make the installation request
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
install_endpoint,
|
||||
json=plugin_payload,
|
||||
headers=headers,
|
||||
cookies=cookies,
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
response_data = response.json()
|
||||
task_id = response_data.get("task_id")
|
||||
|
||||
if not task_id:
|
||||
log.error("No task ID received from installation request")
|
||||
return
|
||||
|
||||
log.progress(f"Installation task created: {task_id}")
|
||||
log.info("Polling for task completion...")
|
||||
|
||||
# Poll for task completion
|
||||
task_endpoint = (
|
||||
f"{base_url}/console/api/workspaces/current/plugin/tasks/{task_id}"
|
||||
)
|
||||
|
||||
max_attempts = 30 # 30 attempts with 2 second delay = 60 seconds max
|
||||
attempt = 0
|
||||
|
||||
log.spinner_start("Installing plugin")
|
||||
|
||||
while attempt < max_attempts:
|
||||
attempt += 1
|
||||
time.sleep(2) # Wait 2 seconds between polls
|
||||
|
||||
task_response = client.get(
|
||||
task_endpoint,
|
||||
headers=headers,
|
||||
cookies=cookies,
|
||||
)
|
||||
|
||||
if task_response.status_code != 200:
|
||||
log.spinner_stop(
|
||||
success=False,
|
||||
message=f"Failed to get task status: {task_response.status_code}",
|
||||
)
|
||||
return
|
||||
|
||||
task_data = task_response.json()
|
||||
task_info = task_data.get("task", {})
|
||||
status = task_info.get("status")
|
||||
|
||||
if status == "success":
|
||||
log.spinner_stop(success=True, message="Plugin installed!")
|
||||
log.success("OpenAI plugin installed successfully!")
|
||||
|
||||
# Display plugin info
|
||||
plugins = task_info.get("plugins", [])
|
||||
if plugins:
|
||||
plugin_info = plugins[0]
|
||||
log.key_value("Plugin ID", plugin_info.get("plugin_id"))
|
||||
log.key_value("Message", plugin_info.get("message"))
|
||||
break
|
||||
|
||||
elif status == "failed":
|
||||
log.spinner_stop(success=False, message="Installation failed")
|
||||
log.error("Plugin installation failed")
|
||||
plugins = task_info.get("plugins", [])
|
||||
if plugins:
|
||||
for plugin in plugins:
|
||||
log.list_item(
|
||||
f"{plugin.get('plugin_id')}: {plugin.get('message')}"
|
||||
)
|
||||
break
|
||||
|
||||
# Continue polling if status is "pending" or other
|
||||
|
||||
else:
|
||||
log.spinner_stop(success=False, message="Installation timed out")
|
||||
log.error("Installation timed out after 60 seconds")
|
||||
|
||||
elif response.status_code == 401:
|
||||
log.error("Installation failed: Unauthorized")
|
||||
log.info("Token may have expired. Please run login_admin.py again")
|
||||
elif response.status_code == 409:
|
||||
log.warning("Plugin may already be installed")
|
||||
log.debug(f"Response: {response.text}")
|
||||
else:
|
||||
log.error(
|
||||
f"Installation failed with status code: {response.status_code}"
|
||||
)
|
||||
log.debug(f"Response: {response.text}")
|
||||
|
||||
except httpx.ConnectError:
|
||||
log.error("Could not connect to Dify API at http://localhost:5001")
|
||||
log.info("Make sure the API server is running with: ./dev/start-api")
|
||||
except Exception as e:
|
||||
log.error(f"An error occurred: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
install_openai_plugin()
|
||||
107
scripts/stress-test/setup/login_admin.py
Executable file
107
scripts/stress-test/setup/login_admin.py
Executable file
@@ -0,0 +1,107 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import httpx
|
||||
import json
|
||||
from common import config_helper
|
||||
from common import Logger
|
||||
|
||||
|
||||
def login_admin() -> None:
|
||||
"""Login with admin account and save access token."""
|
||||
|
||||
log = Logger("Login")
|
||||
log.header("Admin Login")
|
||||
|
||||
# Read admin credentials from config
|
||||
admin_config = config_helper.read_config("admin_config")
|
||||
|
||||
if not admin_config:
|
||||
log.error("Admin config not found")
|
||||
log.info("Please run setup_admin.py first to create the admin account")
|
||||
return
|
||||
|
||||
log.info(f"Logging in with email: {admin_config['email']}")
|
||||
|
||||
# API login endpoint
|
||||
base_url = "http://localhost:5001"
|
||||
login_endpoint = f"{base_url}/console/api/login"
|
||||
|
||||
# Prepare login payload
|
||||
login_payload = {
|
||||
"email": admin_config["email"],
|
||||
"password": admin_config["password"],
|
||||
"remember_me": True,
|
||||
}
|
||||
|
||||
try:
|
||||
# Make the login request
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
login_endpoint,
|
||||
json=login_payload,
|
||||
headers={"Content-Type": "application/json"},
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
log.success("Login successful!")
|
||||
|
||||
# Extract token from response
|
||||
response_data = response.json()
|
||||
|
||||
# Check if login was successful
|
||||
if response_data.get("result") != "success":
|
||||
log.error(f"Login failed: {response_data}")
|
||||
return
|
||||
|
||||
# Extract tokens from data field
|
||||
token_data = response_data.get("data", {})
|
||||
access_token = token_data.get("access_token", "")
|
||||
refresh_token = token_data.get("refresh_token", "")
|
||||
|
||||
if not access_token:
|
||||
log.error("No access token found in response")
|
||||
log.debug(f"Full response: {json.dumps(response_data, indent=2)}")
|
||||
return
|
||||
|
||||
# Save token to config file
|
||||
token_config = {
|
||||
"email": admin_config["email"],
|
||||
"access_token": access_token,
|
||||
"refresh_token": refresh_token,
|
||||
}
|
||||
|
||||
# Save token config
|
||||
if config_helper.write_config("token_config", token_config):
|
||||
log.info(
|
||||
f"Token saved to: {config_helper.get_config_path('benchmark_state')}"
|
||||
)
|
||||
|
||||
# Show truncated token for verification
|
||||
token_display = (
|
||||
f"{access_token[:20]}..."
|
||||
if len(access_token) > 20
|
||||
else "Token saved"
|
||||
)
|
||||
log.key_value("Access token", token_display)
|
||||
|
||||
elif response.status_code == 401:
|
||||
log.error("Login failed: Invalid credentials")
|
||||
log.debug(f"Response: {response.text}")
|
||||
else:
|
||||
log.error(f"Login failed with status code: {response.status_code}")
|
||||
log.debug(f"Response: {response.text}")
|
||||
|
||||
except httpx.ConnectError:
|
||||
log.error("Could not connect to Dify API at http://localhost:5001")
|
||||
log.info("Make sure the API server is running with: ./dev/start-api")
|
||||
except Exception as e:
|
||||
log.error(f"An error occurred: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
login_admin()
|
||||
203
scripts/stress-test/setup/mock_openai_server.py
Executable file
203
scripts/stress-test/setup/mock_openai_server.py
Executable file
@@ -0,0 +1,203 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import json
|
||||
import time
|
||||
import uuid
|
||||
from typing import Any, Iterator
|
||||
from flask import Flask, request, jsonify, Response
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
# Mock models list
|
||||
MODELS = [
|
||||
{
|
||||
"id": "gpt-3.5-turbo",
|
||||
"object": "model",
|
||||
"created": 1677649963,
|
||||
"owned_by": "openai",
|
||||
},
|
||||
{"id": "gpt-4", "object": "model", "created": 1687882411, "owned_by": "openai"},
|
||||
{
|
||||
"id": "text-embedding-ada-002",
|
||||
"object": "model",
|
||||
"created": 1671217299,
|
||||
"owned_by": "openai-internal",
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
@app.route("/v1/models", methods=["GET"])
|
||||
def list_models() -> Any:
|
||||
"""List available models."""
|
||||
return jsonify({"object": "list", "data": MODELS})
|
||||
|
||||
|
||||
@app.route("/v1/chat/completions", methods=["POST"])
|
||||
def chat_completions() -> Any:
|
||||
"""Handle chat completions."""
|
||||
data = request.json or {}
|
||||
model = data.get("model", "gpt-3.5-turbo")
|
||||
messages = data.get("messages", [])
|
||||
stream = data.get("stream", False)
|
||||
|
||||
# Generate mock response
|
||||
response_content = "This is a mock response from the OpenAI server."
|
||||
if messages:
|
||||
last_message = messages[-1].get("content", "")
|
||||
response_content = f"Mock response to: {last_message[:100]}..."
|
||||
|
||||
if stream:
|
||||
# Streaming response
|
||||
def generate() -> Iterator[str]:
|
||||
# Send initial chunk
|
||||
chunk = {
|
||||
"id": f"chatcmpl-{uuid.uuid4().hex[:8]}",
|
||||
"object": "chat.completion.chunk",
|
||||
"created": int(time.time()),
|
||||
"model": model,
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"delta": {"role": "assistant", "content": ""},
|
||||
"finish_reason": None,
|
||||
}
|
||||
],
|
||||
}
|
||||
yield f"data: {json.dumps(chunk)}\n\n"
|
||||
|
||||
# Send content in chunks
|
||||
words = response_content.split()
|
||||
for word in words:
|
||||
chunk = {
|
||||
"id": f"chatcmpl-{uuid.uuid4().hex[:8]}",
|
||||
"object": "chat.completion.chunk",
|
||||
"created": int(time.time()),
|
||||
"model": model,
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"delta": {"content": word + " "},
|
||||
"finish_reason": None,
|
||||
}
|
||||
],
|
||||
}
|
||||
yield f"data: {json.dumps(chunk)}\n\n"
|
||||
time.sleep(0.05) # Simulate streaming delay
|
||||
|
||||
# Send final chunk
|
||||
chunk = {
|
||||
"id": f"chatcmpl-{uuid.uuid4().hex[:8]}",
|
||||
"object": "chat.completion.chunk",
|
||||
"created": int(time.time()),
|
||||
"model": model,
|
||||
"choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}],
|
||||
}
|
||||
yield f"data: {json.dumps(chunk)}\n\n"
|
||||
yield "data: [DONE]\n\n"
|
||||
|
||||
return Response(generate(), mimetype="text/event-stream")
|
||||
else:
|
||||
# Non-streaming response
|
||||
return jsonify(
|
||||
{
|
||||
"id": f"chatcmpl-{uuid.uuid4().hex[:8]}",
|
||||
"object": "chat.completion",
|
||||
"created": int(time.time()),
|
||||
"model": model,
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"message": {"role": "assistant", "content": response_content},
|
||||
"finish_reason": "stop",
|
||||
}
|
||||
],
|
||||
"usage": {
|
||||
"prompt_tokens": len(str(messages)),
|
||||
"completion_tokens": len(response_content.split()),
|
||||
"total_tokens": len(str(messages)) + len(response_content.split()),
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@app.route("/v1/completions", methods=["POST"])
|
||||
def completions() -> Any:
|
||||
"""Handle text completions."""
|
||||
data = request.json or {}
|
||||
model = data.get("model", "gpt-3.5-turbo-instruct")
|
||||
prompt = data.get("prompt", "")
|
||||
|
||||
response_text = f"Mock completion for prompt: {prompt[:100]}..."
|
||||
|
||||
return jsonify(
|
||||
{
|
||||
"id": f"cmpl-{uuid.uuid4().hex[:8]}",
|
||||
"object": "text_completion",
|
||||
"created": int(time.time()),
|
||||
"model": model,
|
||||
"choices": [
|
||||
{
|
||||
"text": response_text,
|
||||
"index": 0,
|
||||
"logprobs": None,
|
||||
"finish_reason": "stop",
|
||||
}
|
||||
],
|
||||
"usage": {
|
||||
"prompt_tokens": len(prompt.split()),
|
||||
"completion_tokens": len(response_text.split()),
|
||||
"total_tokens": len(prompt.split()) + len(response_text.split()),
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@app.route("/v1/embeddings", methods=["POST"])
|
||||
def embeddings() -> Any:
|
||||
"""Handle embeddings requests."""
|
||||
data = request.json or {}
|
||||
model = data.get("model", "text-embedding-ada-002")
|
||||
input_text = data.get("input", "")
|
||||
|
||||
# Generate mock embedding (1536 dimensions for ada-002)
|
||||
mock_embedding = [0.1] * 1536
|
||||
|
||||
return jsonify(
|
||||
{
|
||||
"object": "list",
|
||||
"data": [{"object": "embedding", "embedding": mock_embedding, "index": 0}],
|
||||
"model": model,
|
||||
"usage": {
|
||||
"prompt_tokens": len(input_text.split()),
|
||||
"total_tokens": len(input_text.split()),
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@app.route("/v1/models/<model_id>", methods=["GET"])
|
||||
def get_model(model_id: str) -> tuple[Any, int] | Any:
|
||||
"""Get specific model details."""
|
||||
for model in MODELS:
|
||||
if model["id"] == model_id:
|
||||
return jsonify(model)
|
||||
|
||||
return jsonify({"error": "Model not found"}), 404
|
||||
|
||||
|
||||
@app.route("/health", methods=["GET"])
|
||||
def health() -> Any:
|
||||
"""Health check endpoint."""
|
||||
return jsonify({"status": "healthy"})
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("🚀 Starting Mock OpenAI Server on http://localhost:5004")
|
||||
print("Available endpoints:")
|
||||
print(" - GET /v1/models")
|
||||
print(" - POST /v1/chat/completions")
|
||||
print(" - POST /v1/completions")
|
||||
print(" - POST /v1/embeddings")
|
||||
print(" - GET /v1/models/<model_id>")
|
||||
print(" - GET /health")
|
||||
app.run(host="0.0.0.0", port=5004, debug=True)
|
||||
109
scripts/stress-test/setup/publish_workflow.py
Executable file
109
scripts/stress-test/setup/publish_workflow.py
Executable file
@@ -0,0 +1,109 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import httpx
|
||||
import json
|
||||
from common import config_helper
|
||||
from common import Logger
|
||||
|
||||
|
||||
def publish_workflow() -> None:
|
||||
"""Publish the imported workflow app."""
|
||||
|
||||
log = Logger("PublishWorkflow")
|
||||
log.header("Publishing Workflow")
|
||||
|
||||
# Read token from config
|
||||
access_token = config_helper.get_token()
|
||||
if not access_token:
|
||||
log.error("No access token found in config")
|
||||
return
|
||||
|
||||
# Read app_id from config
|
||||
app_id = config_helper.get_app_id()
|
||||
if not app_id:
|
||||
log.error("No app_id found in config")
|
||||
return
|
||||
|
||||
log.step(f"Publishing workflow for app: {app_id}")
|
||||
|
||||
# API endpoint for publishing workflow
|
||||
base_url = "http://localhost:5001"
|
||||
publish_endpoint = f"{base_url}/console/api/apps/{app_id}/workflows/publish"
|
||||
|
||||
# Publish payload
|
||||
publish_payload = {"marked_name": "", "marked_comment": ""}
|
||||
|
||||
headers = {
|
||||
"Accept": "*/*",
|
||||
"Accept-Language": "en-US,en;q=0.9",
|
||||
"Cache-Control": "no-cache",
|
||||
"Connection": "keep-alive",
|
||||
"DNT": "1",
|
||||
"Origin": "http://localhost:3000",
|
||||
"Pragma": "no-cache",
|
||||
"Referer": "http://localhost:3000/",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-site",
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
|
||||
"authorization": f"Bearer {access_token}",
|
||||
"content-type": "application/json",
|
||||
"sec-ch-ua": '"Not;A=Brand";v="99", "Google Chrome";v="139", "Chromium";v="139"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
}
|
||||
|
||||
cookies = {"locale": "en-US"}
|
||||
|
||||
try:
|
||||
# Make the publish request
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
publish_endpoint,
|
||||
json=publish_payload,
|
||||
headers=headers,
|
||||
cookies=cookies,
|
||||
)
|
||||
|
||||
if response.status_code == 200 or response.status_code == 201:
|
||||
log.success("Workflow published successfully!")
|
||||
log.key_value("App ID", app_id)
|
||||
|
||||
# Try to parse response if it has JSON content
|
||||
if response.text:
|
||||
try:
|
||||
response_data = response.json()
|
||||
if response_data:
|
||||
log.debug(
|
||||
f"Response: {json.dumps(response_data, indent=2)}"
|
||||
)
|
||||
except json.JSONDecodeError:
|
||||
# Response might be empty or non-JSON
|
||||
pass
|
||||
|
||||
elif response.status_code == 401:
|
||||
log.error("Workflow publish failed: Unauthorized")
|
||||
log.info("Token may have expired. Please run login_admin.py again")
|
||||
elif response.status_code == 404:
|
||||
log.error("Workflow publish failed: App not found")
|
||||
log.info("Make sure the app was imported successfully")
|
||||
else:
|
||||
log.error(
|
||||
f"Workflow publish failed with status code: {response.status_code}"
|
||||
)
|
||||
log.debug(f"Response: {response.text}")
|
||||
|
||||
except httpx.ConnectError:
|
||||
log.error("Could not connect to Dify API at http://localhost:5001")
|
||||
log.info("Make sure the API server is running with: ./dev/start-api")
|
||||
except Exception as e:
|
||||
log.error(f"An error occurred: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
publish_workflow()
|
||||
166
scripts/stress-test/setup/run_workflow.py
Executable file
166
scripts/stress-test/setup/run_workflow.py
Executable file
@@ -0,0 +1,166 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import httpx
|
||||
import json
|
||||
from common import config_helper, Logger
|
||||
|
||||
|
||||
def run_workflow(question: str = "fake question", streaming: bool = True) -> None:
|
||||
"""Run the workflow app with a question."""
|
||||
|
||||
log = Logger("RunWorkflow")
|
||||
log.header("Running Workflow")
|
||||
|
||||
# Read API key from config
|
||||
api_token = config_helper.get_api_key()
|
||||
if not api_token:
|
||||
log.error("No API token found in config")
|
||||
log.info("Please run create_api_key.py first to create an API key")
|
||||
return
|
||||
|
||||
log.key_value("Question", question)
|
||||
log.key_value("Mode", "Streaming" if streaming else "Blocking")
|
||||
log.separator()
|
||||
|
||||
# API endpoint for running workflow
|
||||
base_url = "http://localhost:5001"
|
||||
run_endpoint = f"{base_url}/v1/workflows/run"
|
||||
|
||||
# Run payload
|
||||
run_payload = {
|
||||
"inputs": {"question": question},
|
||||
"user": "default user",
|
||||
"response_mode": "streaming" if streaming else "blocking",
|
||||
}
|
||||
|
||||
headers = {
|
||||
"Authorization": f"Bearer {api_token}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
try:
|
||||
# Make the run request
|
||||
with httpx.Client(timeout=30.0) as client:
|
||||
if streaming:
|
||||
# Handle streaming response
|
||||
with client.stream(
|
||||
"POST",
|
||||
run_endpoint,
|
||||
json=run_payload,
|
||||
headers=headers,
|
||||
) as response:
|
||||
if response.status_code == 200:
|
||||
log.success("Workflow started successfully!")
|
||||
log.separator()
|
||||
log.step("Streaming response:")
|
||||
|
||||
for line in response.iter_lines():
|
||||
if line.startswith("data: "):
|
||||
data_str = line[6:] # Remove "data: " prefix
|
||||
if data_str == "[DONE]":
|
||||
log.success("Workflow completed!")
|
||||
break
|
||||
try:
|
||||
data = json.loads(data_str)
|
||||
event = data.get("event")
|
||||
|
||||
if event == "workflow_started":
|
||||
log.progress(
|
||||
f"Workflow started: {data.get('data', {}).get('id')}"
|
||||
)
|
||||
elif event == "node_started":
|
||||
node_data = data.get("data", {})
|
||||
log.progress(
|
||||
f"Node started: {node_data.get('node_type')} - {node_data.get('title')}"
|
||||
)
|
||||
elif event == "node_finished":
|
||||
node_data = data.get("data", {})
|
||||
log.progress(
|
||||
f"Node finished: {node_data.get('node_type')} - {node_data.get('title')}"
|
||||
)
|
||||
|
||||
# Print output if it's the LLM node
|
||||
outputs = node_data.get("outputs", {})
|
||||
if outputs.get("text"):
|
||||
log.separator()
|
||||
log.info("💬 LLM Response:")
|
||||
log.info(outputs.get("text"), indent=2)
|
||||
log.separator()
|
||||
|
||||
elif event == "workflow_finished":
|
||||
workflow_data = data.get("data", {})
|
||||
outputs = workflow_data.get("outputs", {})
|
||||
if outputs.get("answer"):
|
||||
log.separator()
|
||||
log.info("📤 Final Answer:")
|
||||
log.info(outputs.get("answer"), indent=2)
|
||||
log.separator()
|
||||
log.key_value(
|
||||
"Total tokens",
|
||||
str(workflow_data.get("total_tokens", 0)),
|
||||
)
|
||||
log.key_value(
|
||||
"Total steps",
|
||||
str(workflow_data.get("total_steps", 0)),
|
||||
)
|
||||
|
||||
elif event == "error":
|
||||
log.error(f"Error: {data.get('message')}")
|
||||
|
||||
except json.JSONDecodeError:
|
||||
# Some lines might not be JSON
|
||||
pass
|
||||
else:
|
||||
log.error(
|
||||
f"Workflow run failed with status code: {response.status_code}"
|
||||
)
|
||||
log.debug(f"Response: {response.text}")
|
||||
else:
|
||||
# Handle blocking response
|
||||
response = client.post(
|
||||
run_endpoint,
|
||||
json=run_payload,
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
log.success("Workflow completed successfully!")
|
||||
response_data = response.json()
|
||||
|
||||
log.separator()
|
||||
log.debug(f"Full response: {json.dumps(response_data, indent=2)}")
|
||||
|
||||
# Extract the answer if available
|
||||
outputs = response_data.get("data", {}).get("outputs", {})
|
||||
if outputs.get("answer"):
|
||||
log.separator()
|
||||
log.info("📤 Final Answer:")
|
||||
log.info(outputs.get("answer"), indent=2)
|
||||
else:
|
||||
log.error(
|
||||
f"Workflow run failed with status code: {response.status_code}"
|
||||
)
|
||||
log.debug(f"Response: {response.text}")
|
||||
|
||||
except httpx.ConnectError:
|
||||
log.error("Could not connect to Dify API at http://localhost:5001")
|
||||
log.info("Make sure the API server is running with: ./dev/start-api")
|
||||
except httpx.TimeoutException:
|
||||
log.error("Request timed out")
|
||||
except Exception as e:
|
||||
log.error(f"An error occurred: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Allow passing question as command line argument
|
||||
if len(sys.argv) > 1:
|
||||
question = " ".join(sys.argv[1:])
|
||||
else:
|
||||
question = "What is the capital of France?"
|
||||
|
||||
run_workflow(question=question, streaming=True)
|
||||
75
scripts/stress-test/setup/setup_admin.py
Executable file
75
scripts/stress-test/setup/setup_admin.py
Executable file
@@ -0,0 +1,75 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import httpx
|
||||
from common import config_helper, Logger
|
||||
|
||||
|
||||
def setup_admin_account() -> None:
|
||||
"""Setup Dify API with an admin account."""
|
||||
|
||||
log = Logger("SetupAdmin")
|
||||
log.header("Setting up Admin Account")
|
||||
|
||||
# Admin account credentials
|
||||
admin_config = {
|
||||
"email": "test@dify.ai",
|
||||
"username": "dify",
|
||||
"password": "password123",
|
||||
}
|
||||
|
||||
# Save credentials to config file
|
||||
if config_helper.write_config("admin_config", admin_config):
|
||||
log.info(
|
||||
f"Admin credentials saved to: {config_helper.get_config_path('benchmark_state')}"
|
||||
)
|
||||
|
||||
# API setup endpoint
|
||||
base_url = "http://localhost:5001"
|
||||
setup_endpoint = f"{base_url}/console/api/setup"
|
||||
|
||||
# Prepare setup payload
|
||||
setup_payload = {
|
||||
"email": admin_config["email"],
|
||||
"name": admin_config["username"],
|
||||
"password": admin_config["password"],
|
||||
}
|
||||
|
||||
log.step("Configuring Dify with admin account...")
|
||||
|
||||
try:
|
||||
# Make the setup request
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
setup_endpoint,
|
||||
json=setup_payload,
|
||||
headers={"Content-Type": "application/json"},
|
||||
)
|
||||
|
||||
if response.status_code == 201:
|
||||
log.success("Admin account created successfully!")
|
||||
log.key_value("Email", admin_config["email"])
|
||||
log.key_value("Username", admin_config["username"])
|
||||
|
||||
elif response.status_code == 400:
|
||||
log.warning(
|
||||
"Setup may have already been completed or invalid data provided"
|
||||
)
|
||||
log.debug(f"Response: {response.text}")
|
||||
else:
|
||||
log.error(f"Setup failed with status code: {response.status_code}")
|
||||
log.debug(f"Response: {response.text}")
|
||||
|
||||
except httpx.ConnectError:
|
||||
log.error("Could not connect to Dify API at http://localhost:5001")
|
||||
log.info("Make sure the API server is running with: ./dev/start-api")
|
||||
except Exception as e:
|
||||
log.error(f"An error occurred: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
setup_admin_account()
|
||||
164
scripts/stress-test/setup_all.py
Executable file
164
scripts/stress-test/setup_all.py
Executable file
@@ -0,0 +1,164 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import socket
|
||||
from pathlib import Path
|
||||
|
||||
from common import Logger, ProgressLogger
|
||||
|
||||
|
||||
def run_script(script_name: str, description: str) -> bool:
|
||||
"""Run a Python script and return success status."""
|
||||
script_path = Path(__file__).parent / "setup" / script_name
|
||||
|
||||
if not script_path.exists():
|
||||
print(f"❌ Script not found: {script_path}")
|
||||
return False
|
||||
|
||||
print(f"\n{'=' * 60}")
|
||||
print(f"🚀 {description}")
|
||||
print(f" Running: {script_name}")
|
||||
print(f"{'=' * 60}")
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[sys.executable, str(script_path)],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=False,
|
||||
)
|
||||
|
||||
# Print output
|
||||
if result.stdout:
|
||||
print(result.stdout)
|
||||
if result.stderr:
|
||||
print(result.stderr, file=sys.stderr)
|
||||
|
||||
if result.returncode != 0:
|
||||
print(f"❌ Script failed with exit code: {result.returncode}")
|
||||
return False
|
||||
|
||||
print(f"✅ {script_name} completed successfully")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error running {script_name}: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def check_port(host: str, port: int, service_name: str) -> bool:
|
||||
"""Check if a service is running on the specified port."""
|
||||
try:
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
sock.settimeout(2)
|
||||
result = sock.connect_ex((host, port))
|
||||
sock.close()
|
||||
|
||||
if result == 0:
|
||||
Logger().success(f"{service_name} is running on port {port}")
|
||||
return True
|
||||
else:
|
||||
Logger().error(f"{service_name} is not accessible on port {port}")
|
||||
return False
|
||||
except Exception as e:
|
||||
Logger().error(f"Error checking {service_name}: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Run all setup scripts in order."""
|
||||
|
||||
log = Logger("Setup")
|
||||
log.box("Dify Stress Test Setup - Full Installation")
|
||||
|
||||
# Check if required services are running
|
||||
log.step("Checking required services...")
|
||||
log.separator()
|
||||
|
||||
dify_running = check_port("localhost", 5001, "Dify API server")
|
||||
if not dify_running:
|
||||
log.info("To start Dify API server:")
|
||||
log.list_item("Run: ./dev/start-api")
|
||||
|
||||
mock_running = check_port("localhost", 5004, "Mock OpenAI server")
|
||||
if not mock_running:
|
||||
log.info("To start Mock OpenAI server:")
|
||||
log.list_item("Run: python scripts/stress-test/setup/mock_openai_server.py")
|
||||
|
||||
if not dify_running or not mock_running:
|
||||
print("\n⚠️ Both services must be running before proceeding.")
|
||||
retry = input("\nWould you like to check again? (yes/no): ")
|
||||
if retry.lower() in ["yes", "y"]:
|
||||
return main() # Recursively call main to check again
|
||||
else:
|
||||
print(
|
||||
"❌ Setup cancelled. Please start the required services and try again."
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
log.success("All required services are running!")
|
||||
input("\nPress Enter to continue with setup...")
|
||||
|
||||
# Define setup steps
|
||||
setup_steps = [
|
||||
("setup_admin.py", "Creating admin account"),
|
||||
("login_admin.py", "Logging in and getting access token"),
|
||||
("install_openai_plugin.py", "Installing OpenAI plugin"),
|
||||
("configure_openai_plugin.py", "Configuring OpenAI plugin with mock server"),
|
||||
("import_workflow_app.py", "Importing workflow application"),
|
||||
("create_api_key.py", "Creating API key for the app"),
|
||||
("publish_workflow.py", "Publishing the workflow"),
|
||||
]
|
||||
|
||||
# Create progress logger
|
||||
progress = ProgressLogger(len(setup_steps), log)
|
||||
failed_step = None
|
||||
|
||||
for script, description in setup_steps:
|
||||
progress.next_step(description)
|
||||
success = run_script(script, description)
|
||||
|
||||
if not success:
|
||||
failed_step = script
|
||||
break
|
||||
|
||||
# Small delay between steps
|
||||
time.sleep(1)
|
||||
|
||||
log.separator()
|
||||
|
||||
if failed_step:
|
||||
log.error(f"Setup failed at: {failed_step}")
|
||||
log.separator()
|
||||
log.info("Troubleshooting:")
|
||||
log.list_item("Check if the Dify API server is running (./dev/start-api)")
|
||||
log.list_item("Check if the mock OpenAI server is running (port 5004)")
|
||||
log.list_item("Review the error messages above")
|
||||
log.list_item("Run cleanup.py and try again")
|
||||
sys.exit(1)
|
||||
else:
|
||||
progress.complete()
|
||||
log.separator()
|
||||
log.success("Setup completed successfully!")
|
||||
log.info("Next steps:")
|
||||
log.list_item("Test the workflow:")
|
||||
log.info(
|
||||
' python scripts/stress-test/setup/run_workflow.py "Your question here"',
|
||||
indent=4,
|
||||
)
|
||||
log.list_item("To clean up and start over:")
|
||||
log.info(" python scripts/stress-test/cleanup.py", indent=4)
|
||||
|
||||
# Optionally run a test
|
||||
log.separator()
|
||||
test_input = input("Would you like to run a test workflow now? (yes/no): ")
|
||||
|
||||
if test_input.lower() in ["yes", "y"]:
|
||||
log.step("Running test workflow...")
|
||||
run_script("run_workflow.py", "Testing workflow with default question")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
770
scripts/stress-test/sse_benchmark.py
Normal file
770
scripts/stress-test/sse_benchmark.py
Normal file
@@ -0,0 +1,770 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
SSE (Server-Sent Events) Stress Test for Dify Workflow API
|
||||
|
||||
This script stress tests the streaming performance of Dify's workflow execution API,
|
||||
measuring key metrics like connection rate, event throughput, and time to first event (TTFE).
|
||||
"""
|
||||
|
||||
import json
|
||||
import time
|
||||
import random
|
||||
import sys
|
||||
import threading
|
||||
import os
|
||||
import logging
|
||||
import statistics
|
||||
from pathlib import Path
|
||||
from collections import deque
|
||||
from datetime import datetime
|
||||
from dataclasses import dataclass, asdict
|
||||
from locust import HttpUser, task, between, events, constant
|
||||
from typing import TypedDict, Literal, TypeAlias
|
||||
import requests.exceptions
|
||||
|
||||
# Add the stress-test directory to path to import common modules
|
||||
sys.path.insert(0, str(Path(__file__).parent))
|
||||
from common.config_helper import ConfigHelper # type: ignore[import-not-found]
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Configuration from environment
|
||||
WORKFLOW_PATH = os.getenv("WORKFLOW_PATH", "/v1/workflows/run")
|
||||
CONNECT_TIMEOUT = float(os.getenv("CONNECT_TIMEOUT", "10"))
|
||||
READ_TIMEOUT = float(os.getenv("READ_TIMEOUT", "60"))
|
||||
TERMINAL_EVENTS = [e.strip() for e in os.getenv("TERMINAL_EVENTS", "workflow_finished,error").split(",") if e.strip()]
|
||||
QUESTIONS_FILE = os.getenv("QUESTIONS_FILE", "")
|
||||
|
||||
|
||||
# Type definitions
|
||||
ErrorType: TypeAlias = Literal[
|
||||
"connection_error",
|
||||
"timeout",
|
||||
"invalid_json",
|
||||
"http_4xx",
|
||||
"http_5xx",
|
||||
"early_termination",
|
||||
"invalid_response",
|
||||
]
|
||||
|
||||
|
||||
class ErrorCounts(TypedDict):
|
||||
"""Error count tracking"""
|
||||
connection_error: int
|
||||
timeout: int
|
||||
invalid_json: int
|
||||
http_4xx: int
|
||||
http_5xx: int
|
||||
early_termination: int
|
||||
invalid_response: int
|
||||
|
||||
|
||||
class SSEEvent(TypedDict):
|
||||
"""Server-Sent Event structure"""
|
||||
data: str
|
||||
event: str
|
||||
id: str | None
|
||||
|
||||
|
||||
class WorkflowInputs(TypedDict):
|
||||
"""Workflow input structure"""
|
||||
question: str
|
||||
|
||||
|
||||
class WorkflowRequestData(TypedDict):
|
||||
"""Workflow request payload"""
|
||||
inputs: WorkflowInputs
|
||||
response_mode: Literal["streaming"]
|
||||
user: str
|
||||
|
||||
|
||||
class ParsedEventData(TypedDict, total=False):
|
||||
"""Parsed event data from SSE stream"""
|
||||
event: str
|
||||
task_id: str
|
||||
workflow_run_id: str
|
||||
data: object # For dynamic content
|
||||
created_at: int
|
||||
|
||||
|
||||
class LocustStats(TypedDict):
|
||||
"""Locust statistics structure"""
|
||||
total_requests: int
|
||||
total_failures: int
|
||||
avg_response_time: float
|
||||
min_response_time: float
|
||||
max_response_time: float
|
||||
|
||||
|
||||
class ReportData(TypedDict):
|
||||
"""JSON report structure"""
|
||||
timestamp: str
|
||||
duration_seconds: float
|
||||
metrics: dict[str, object] # Metrics as dict for JSON serialization
|
||||
locust_stats: LocustStats | None
|
||||
|
||||
|
||||
@dataclass
|
||||
class StreamMetrics:
|
||||
"""Metrics for a single stream"""
|
||||
|
||||
stream_duration: float
|
||||
events_count: int
|
||||
bytes_received: int
|
||||
ttfe: float
|
||||
inter_event_times: list[float]
|
||||
|
||||
|
||||
@dataclass
|
||||
class MetricsSnapshot:
|
||||
"""Snapshot of current metrics state"""
|
||||
|
||||
active_connections: int
|
||||
total_connections: int
|
||||
total_events: int
|
||||
connection_rate: float
|
||||
event_rate: float
|
||||
overall_conn_rate: float
|
||||
overall_event_rate: float
|
||||
ttfe_avg: float
|
||||
ttfe_min: float
|
||||
ttfe_max: float
|
||||
ttfe_p50: float
|
||||
ttfe_p95: float
|
||||
ttfe_samples: int
|
||||
ttfe_total_samples: int # Total TTFE samples collected (not limited by window)
|
||||
error_counts: ErrorCounts
|
||||
stream_duration_avg: float
|
||||
stream_duration_p50: float
|
||||
stream_duration_p95: float
|
||||
events_per_stream_avg: float
|
||||
inter_event_latency_avg: float
|
||||
inter_event_latency_p50: float
|
||||
inter_event_latency_p95: float
|
||||
|
||||
|
||||
class MetricsTracker:
|
||||
def __init__(self) -> None:
|
||||
self.lock = threading.Lock()
|
||||
self.active_connections = 0
|
||||
self.total_connections = 0
|
||||
self.total_events = 0
|
||||
self.start_time = time.time()
|
||||
|
||||
# Enhanced metrics with memory limits
|
||||
self.max_samples = 10000 # Prevent unbounded growth
|
||||
self.ttfe_samples: deque[float] = deque(maxlen=self.max_samples)
|
||||
self.ttfe_total_count = 0 # Track total TTFE samples collected
|
||||
|
||||
# For rate calculations - no maxlen to avoid artificial limits
|
||||
self.connection_times: deque[float] = deque()
|
||||
self.event_times: deque[float] = deque()
|
||||
self.last_stats_time = time.time()
|
||||
self.last_total_connections = 0
|
||||
self.last_total_events = 0
|
||||
self.stream_metrics: deque[StreamMetrics] = deque(maxlen=self.max_samples)
|
||||
self.error_counts: ErrorCounts = ErrorCounts(
|
||||
connection_error=0,
|
||||
timeout=0,
|
||||
invalid_json=0,
|
||||
http_4xx=0,
|
||||
http_5xx=0,
|
||||
early_termination=0,
|
||||
invalid_response=0,
|
||||
)
|
||||
|
||||
def connection_started(self) -> None:
|
||||
with self.lock:
|
||||
self.active_connections += 1
|
||||
self.total_connections += 1
|
||||
self.connection_times.append(time.time())
|
||||
|
||||
def connection_ended(self) -> None:
|
||||
with self.lock:
|
||||
self.active_connections -= 1
|
||||
|
||||
def event_received(self) -> None:
|
||||
with self.lock:
|
||||
self.total_events += 1
|
||||
self.event_times.append(time.time())
|
||||
|
||||
def record_ttfe(self, ttfe_ms: float) -> None:
|
||||
with self.lock:
|
||||
self.ttfe_samples.append(ttfe_ms) # deque handles maxlen
|
||||
self.ttfe_total_count += 1 # Increment total counter
|
||||
|
||||
def record_stream_metrics(self, metrics: StreamMetrics) -> None:
|
||||
with self.lock:
|
||||
self.stream_metrics.append(metrics) # deque handles maxlen
|
||||
|
||||
def record_error(self, error_type: ErrorType) -> None:
|
||||
with self.lock:
|
||||
self.error_counts[error_type] += 1
|
||||
|
||||
def get_stats(self) -> MetricsSnapshot:
|
||||
with self.lock:
|
||||
current_time = time.time()
|
||||
time_window = 10.0 # 10 second window for rate calculation
|
||||
|
||||
# Clean up old timestamps outside the window
|
||||
cutoff_time = current_time - time_window
|
||||
while self.connection_times and self.connection_times[0] < cutoff_time:
|
||||
self.connection_times.popleft()
|
||||
while self.event_times and self.event_times[0] < cutoff_time:
|
||||
self.event_times.popleft()
|
||||
|
||||
# Calculate rates based on actual window or elapsed time
|
||||
window_duration = min(time_window, current_time - self.start_time)
|
||||
if window_duration > 0:
|
||||
conn_rate = len(self.connection_times) / window_duration
|
||||
event_rate = len(self.event_times) / window_duration
|
||||
else:
|
||||
conn_rate = 0
|
||||
event_rate = 0
|
||||
|
||||
# Calculate TTFE statistics
|
||||
if self.ttfe_samples:
|
||||
avg_ttfe = statistics.mean(self.ttfe_samples)
|
||||
min_ttfe = min(self.ttfe_samples)
|
||||
max_ttfe = max(self.ttfe_samples)
|
||||
p50_ttfe = statistics.median(self.ttfe_samples)
|
||||
if len(self.ttfe_samples) >= 2:
|
||||
quantiles = statistics.quantiles(
|
||||
self.ttfe_samples, n=20, method="inclusive"
|
||||
)
|
||||
p95_ttfe = quantiles[18] # 19th of 19 quantiles = 95th percentile
|
||||
else:
|
||||
p95_ttfe = max_ttfe
|
||||
else:
|
||||
avg_ttfe = min_ttfe = max_ttfe = p50_ttfe = p95_ttfe = 0
|
||||
|
||||
# Calculate stream metrics
|
||||
if self.stream_metrics:
|
||||
durations = [m.stream_duration for m in self.stream_metrics]
|
||||
events_per_stream = [m.events_count for m in self.stream_metrics]
|
||||
stream_duration_avg = statistics.mean(durations)
|
||||
stream_duration_p50 = statistics.median(durations)
|
||||
stream_duration_p95 = (
|
||||
statistics.quantiles(durations, n=20, method="inclusive")[18]
|
||||
if len(durations) >= 2
|
||||
else max(durations)
|
||||
if durations
|
||||
else 0
|
||||
)
|
||||
events_per_stream_avg = (
|
||||
statistics.mean(events_per_stream) if events_per_stream else 0
|
||||
)
|
||||
|
||||
# Calculate inter-event latency statistics
|
||||
all_inter_event_times = []
|
||||
for m in self.stream_metrics:
|
||||
all_inter_event_times.extend(m.inter_event_times)
|
||||
|
||||
if all_inter_event_times:
|
||||
inter_event_latency_avg = statistics.mean(all_inter_event_times)
|
||||
inter_event_latency_p50 = statistics.median(all_inter_event_times)
|
||||
inter_event_latency_p95 = (
|
||||
statistics.quantiles(
|
||||
all_inter_event_times, n=20, method="inclusive"
|
||||
)[18]
|
||||
if len(all_inter_event_times) >= 2
|
||||
else max(all_inter_event_times)
|
||||
)
|
||||
else:
|
||||
inter_event_latency_avg = inter_event_latency_p50 = (
|
||||
inter_event_latency_p95
|
||||
) = 0
|
||||
else:
|
||||
stream_duration_avg = stream_duration_p50 = stream_duration_p95 = (
|
||||
events_per_stream_avg
|
||||
) = 0
|
||||
inter_event_latency_avg = inter_event_latency_p50 = (
|
||||
inter_event_latency_p95
|
||||
) = 0
|
||||
|
||||
# Also calculate overall average rates
|
||||
total_elapsed = current_time - self.start_time
|
||||
overall_conn_rate = (
|
||||
self.total_connections / total_elapsed if total_elapsed > 0 else 0
|
||||
)
|
||||
overall_event_rate = (
|
||||
self.total_events / total_elapsed if total_elapsed > 0 else 0
|
||||
)
|
||||
|
||||
return MetricsSnapshot(
|
||||
active_connections=self.active_connections,
|
||||
total_connections=self.total_connections,
|
||||
total_events=self.total_events,
|
||||
connection_rate=conn_rate,
|
||||
event_rate=event_rate,
|
||||
overall_conn_rate=overall_conn_rate,
|
||||
overall_event_rate=overall_event_rate,
|
||||
ttfe_avg=avg_ttfe,
|
||||
ttfe_min=min_ttfe,
|
||||
ttfe_max=max_ttfe,
|
||||
ttfe_p50=p50_ttfe,
|
||||
ttfe_p95=p95_ttfe,
|
||||
ttfe_samples=len(self.ttfe_samples),
|
||||
ttfe_total_samples=self.ttfe_total_count, # Return total count
|
||||
error_counts=ErrorCounts(**self.error_counts),
|
||||
stream_duration_avg=stream_duration_avg,
|
||||
stream_duration_p50=stream_duration_p50,
|
||||
stream_duration_p95=stream_duration_p95,
|
||||
events_per_stream_avg=events_per_stream_avg,
|
||||
inter_event_latency_avg=inter_event_latency_avg,
|
||||
inter_event_latency_p50=inter_event_latency_p50,
|
||||
inter_event_latency_p95=inter_event_latency_p95,
|
||||
)
|
||||
|
||||
|
||||
# Global metrics instance
|
||||
metrics = MetricsTracker()
|
||||
|
||||
|
||||
class SSEParser:
|
||||
"""Parser for Server-Sent Events according to W3C spec"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.data_buffer: list[str] = []
|
||||
self.event_type: str | None = None
|
||||
self.event_id: str | None = None
|
||||
|
||||
def parse_line(self, line: str) -> SSEEvent | None:
|
||||
"""Parse a single SSE line and return event if complete"""
|
||||
# Empty line signals end of event
|
||||
if not line:
|
||||
if self.data_buffer:
|
||||
event = SSEEvent(
|
||||
data="\n".join(self.data_buffer),
|
||||
event=self.event_type or "message",
|
||||
id=self.event_id,
|
||||
)
|
||||
self.data_buffer = []
|
||||
self.event_type = None
|
||||
self.event_id = None
|
||||
return event
|
||||
return None
|
||||
|
||||
# Comment line
|
||||
if line.startswith(":"):
|
||||
return None
|
||||
|
||||
# Parse field
|
||||
if ":" in line:
|
||||
field, value = line.split(":", 1)
|
||||
value = value.lstrip()
|
||||
|
||||
if field == "data":
|
||||
self.data_buffer.append(value)
|
||||
elif field == "event":
|
||||
self.event_type = value
|
||||
elif field == "id":
|
||||
self.event_id = value
|
||||
|
||||
return None
|
||||
|
||||
|
||||
# Note: SSEClient removed - we'll handle SSE parsing directly in the task for better Locust integration
|
||||
|
||||
|
||||
class DifyWorkflowUser(HttpUser):
|
||||
"""Locust user for testing Dify workflow SSE endpoints"""
|
||||
|
||||
# Use constant wait for streaming workloads
|
||||
wait_time = constant(0) if os.getenv("WAIT_TIME", "0") == "0" else between(1, 3)
|
||||
|
||||
def __init__(self, *args: object, **kwargs: object) -> None:
|
||||
super().__init__(*args, **kwargs) # type: ignore[arg-type]
|
||||
|
||||
# Load API configuration
|
||||
config_helper = ConfigHelper()
|
||||
self.api_token = config_helper.get_api_key()
|
||||
|
||||
if not self.api_token:
|
||||
raise ValueError("API key not found. Please run setup_all.py first.")
|
||||
|
||||
# Load questions from file or use defaults
|
||||
if QUESTIONS_FILE and os.path.exists(QUESTIONS_FILE):
|
||||
with open(QUESTIONS_FILE, "r") as f:
|
||||
self.questions = [line.strip() for line in f if line.strip()]
|
||||
else:
|
||||
self.questions = [
|
||||
"What is artificial intelligence?",
|
||||
"Explain quantum computing",
|
||||
"What is machine learning?",
|
||||
"How do neural networks work?",
|
||||
"What is renewable energy?",
|
||||
]
|
||||
|
||||
self.user_counter = 0
|
||||
|
||||
def on_start(self) -> None:
|
||||
"""Called when a user starts"""
|
||||
self.user_counter = 0
|
||||
|
||||
@task
|
||||
def test_workflow_stream(self) -> None:
|
||||
"""Test workflow SSE streaming endpoint"""
|
||||
|
||||
question = random.choice(self.questions)
|
||||
self.user_counter += 1
|
||||
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self.api_token}",
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "text/event-stream",
|
||||
"Cache-Control": "no-cache",
|
||||
}
|
||||
|
||||
data = WorkflowRequestData(
|
||||
inputs=WorkflowInputs(question=question),
|
||||
response_mode="streaming",
|
||||
user=f"user_{self.user_counter}",
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
first_event_time = None
|
||||
event_count = 0
|
||||
inter_event_times: list[float] = []
|
||||
last_event_time = None
|
||||
ttfe = 0
|
||||
request_success = False
|
||||
bytes_received = 0
|
||||
|
||||
metrics.connection_started()
|
||||
|
||||
# Use catch_response context manager directly
|
||||
with self.client.request(
|
||||
method="POST",
|
||||
url=WORKFLOW_PATH,
|
||||
headers=headers,
|
||||
json=data,
|
||||
stream=True,
|
||||
catch_response=True,
|
||||
timeout=(CONNECT_TIMEOUT, READ_TIMEOUT),
|
||||
name="/v1/workflows/run", # Name for Locust stats
|
||||
) as response:
|
||||
try:
|
||||
# Validate response
|
||||
if response.status_code >= 400:
|
||||
error_type: ErrorType = (
|
||||
"http_4xx" if response.status_code < 500 else "http_5xx"
|
||||
)
|
||||
metrics.record_error(error_type)
|
||||
response.failure(f"HTTP {response.status_code}")
|
||||
return
|
||||
|
||||
content_type = response.headers.get("Content-Type", "")
|
||||
if (
|
||||
"text/event-stream" not in content_type
|
||||
and "application/json" not in content_type
|
||||
):
|
||||
logger.error(f"Expected text/event-stream, got: {content_type}")
|
||||
metrics.record_error("invalid_response")
|
||||
response.failure(f"Invalid content type: {content_type}")
|
||||
return
|
||||
|
||||
# Parse SSE events
|
||||
parser = SSEParser()
|
||||
|
||||
for line in response.iter_lines(decode_unicode=True):
|
||||
# Check if runner is stopping
|
||||
if getattr(self.environment.runner, 'state', '') in ('stopping', 'stopped'):
|
||||
logger.debug("Runner stopping, breaking streaming loop")
|
||||
break
|
||||
|
||||
if line is not None:
|
||||
bytes_received += len(line.encode("utf-8"))
|
||||
|
||||
# Parse SSE line
|
||||
event = parser.parse_line(line if line is not None else "")
|
||||
if event:
|
||||
event_count += 1
|
||||
current_time = time.time()
|
||||
metrics.event_received()
|
||||
|
||||
# Track inter-event timing
|
||||
if last_event_time:
|
||||
inter_event_times.append(
|
||||
(current_time - last_event_time) * 1000
|
||||
)
|
||||
last_event_time = current_time
|
||||
|
||||
if first_event_time is None:
|
||||
first_event_time = current_time
|
||||
ttfe = (first_event_time - start_time) * 1000
|
||||
metrics.record_ttfe(ttfe)
|
||||
|
||||
try:
|
||||
# Parse event data
|
||||
event_data = event.get("data", "")
|
||||
if event_data:
|
||||
if event_data == "[DONE]":
|
||||
logger.debug("Received [DONE] sentinel")
|
||||
request_success = True
|
||||
break
|
||||
|
||||
try:
|
||||
parsed_event: ParsedEventData = json.loads(event_data)
|
||||
# Check for terminal events
|
||||
if parsed_event.get("event") in TERMINAL_EVENTS:
|
||||
logger.debug(
|
||||
f"Received terminal event: {parsed_event.get('event')}"
|
||||
)
|
||||
request_success = True
|
||||
break
|
||||
except json.JSONDecodeError as e:
|
||||
logger.debug(
|
||||
f"JSON decode error: {e} for data: {event_data[:100]}"
|
||||
)
|
||||
metrics.record_error("invalid_json")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing event: {e}")
|
||||
|
||||
# Mark success only if terminal condition was met or events were received
|
||||
if request_success:
|
||||
response.success()
|
||||
elif event_count > 0:
|
||||
# Got events but no proper terminal condition
|
||||
metrics.record_error("early_termination")
|
||||
response.failure("Stream ended without terminal event")
|
||||
else:
|
||||
response.failure("No events received")
|
||||
|
||||
except (
|
||||
requests.exceptions.ConnectTimeout,
|
||||
requests.exceptions.ReadTimeout,
|
||||
) as e:
|
||||
metrics.record_error("timeout")
|
||||
response.failure(f"Timeout: {e}")
|
||||
except (
|
||||
requests.exceptions.ConnectionError,
|
||||
requests.exceptions.RequestException,
|
||||
) as e:
|
||||
metrics.record_error("connection_error")
|
||||
response.failure(f"Connection error: {e}")
|
||||
except Exception as e:
|
||||
response.failure(str(e))
|
||||
raise
|
||||
finally:
|
||||
metrics.connection_ended()
|
||||
|
||||
# Record stream metrics
|
||||
if event_count > 0:
|
||||
stream_duration = (time.time() - start_time) * 1000
|
||||
stream_metrics = StreamMetrics(
|
||||
stream_duration=stream_duration,
|
||||
events_count=event_count,
|
||||
bytes_received=bytes_received,
|
||||
ttfe=ttfe,
|
||||
inter_event_times=inter_event_times,
|
||||
)
|
||||
metrics.record_stream_metrics(stream_metrics)
|
||||
logger.debug(
|
||||
f"Stream completed: {event_count} events, {stream_duration:.1f}ms, success={request_success}"
|
||||
)
|
||||
else:
|
||||
logger.warning("No events received in stream")
|
||||
|
||||
|
||||
# Event handlers
|
||||
@events.test_start.add_listener # type: ignore[misc]
|
||||
def on_test_start(environment: object, **kwargs: object) -> None:
|
||||
logger.info("=" * 80)
|
||||
logger.info(" " * 25 + "DIFY SSE BENCHMARK - REAL-TIME METRICS")
|
||||
logger.info("=" * 80)
|
||||
logger.info(f"Started at: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
logger.info("=" * 80)
|
||||
|
||||
# Periodic stats reporting
|
||||
def report_stats() -> None:
|
||||
if not hasattr(environment, 'runner'):
|
||||
return
|
||||
runner = environment.runner
|
||||
while hasattr(runner, 'state') and runner.state not in ["stopped", "stopping"]:
|
||||
time.sleep(5) # Report every 5 seconds
|
||||
if hasattr(runner, 'state') and runner.state == "running":
|
||||
stats = metrics.get_stats()
|
||||
|
||||
# Only log on master node in distributed mode
|
||||
is_master = not getattr(environment.runner, "worker_id", None) if hasattr(environment, 'runner') else True
|
||||
if is_master:
|
||||
# Clear previous lines and show updated stats
|
||||
logger.info("\n" + "=" * 80)
|
||||
logger.info(
|
||||
f"{'METRIC':<25} {'CURRENT':>15} {'RATE (10s)':>15} {'AVG (overall)':>15} {'TOTAL':>12}"
|
||||
)
|
||||
logger.info("-" * 80)
|
||||
|
||||
# Active SSE Connections
|
||||
logger.info(
|
||||
f"{'Active SSE Connections':<25} {stats.active_connections:>15,d} {'-':>15} {'-':>12} {'-':>12}"
|
||||
)
|
||||
|
||||
# New Connection Rate
|
||||
logger.info(
|
||||
f"{'New Connections':<25} {'-':>15} {stats.connection_rate:>13.2f}/s {stats.overall_conn_rate:>13.2f}/s {stats.total_connections:>12,d}"
|
||||
)
|
||||
|
||||
# Event Throughput
|
||||
logger.info(
|
||||
f"{'Event Throughput':<25} {'-':>15} {stats.event_rate:>13.2f}/s {stats.overall_event_rate:>13.2f}/s {stats.total_events:>12,d}"
|
||||
)
|
||||
|
||||
logger.info("-" * 80)
|
||||
logger.info(
|
||||
f"{'TIME TO FIRST EVENT':<25} {'AVG':>15} {'P50':>10} {'P95':>10} {'MIN':>10} {'MAX':>10}"
|
||||
)
|
||||
logger.info(
|
||||
f"{'(TTFE in ms)':<25} {stats.ttfe_avg:>15.1f} {stats.ttfe_p50:>10.1f} {stats.ttfe_p95:>10.1f} {stats.ttfe_min:>10.1f} {stats.ttfe_max:>10.1f}"
|
||||
)
|
||||
logger.info(f"{'Window Samples':<25} {stats.ttfe_samples:>15,d} (last {min(10000, stats.ttfe_total_samples):,d} samples)")
|
||||
logger.info(f"{'Total Samples':<25} {stats.ttfe_total_samples:>15,d}")
|
||||
|
||||
# Inter-event latency
|
||||
if stats.inter_event_latency_avg > 0:
|
||||
logger.info("-" * 80)
|
||||
logger.info(
|
||||
f"{'INTER-EVENT LATENCY':<25} {'AVG':>15} {'P50':>10} {'P95':>10}"
|
||||
)
|
||||
logger.info(
|
||||
f"{'(ms between events)':<25} {stats.inter_event_latency_avg:>15.1f} {stats.inter_event_latency_p50:>10.1f} {stats.inter_event_latency_p95:>10.1f}"
|
||||
)
|
||||
|
||||
# Error stats
|
||||
if any(stats.error_counts.values()):
|
||||
logger.info("-" * 80)
|
||||
logger.info(f"{'ERROR TYPE':<25} {'COUNT':>15}")
|
||||
for error_type, count in stats.error_counts.items():
|
||||
if isinstance(count, int) and count > 0:
|
||||
logger.info(f"{error_type:<25} {count:>15,d}")
|
||||
|
||||
logger.info("=" * 80)
|
||||
|
||||
# Show Locust stats summary
|
||||
if hasattr(environment, 'stats') and hasattr(environment.stats, 'total'):
|
||||
total = environment.stats.total
|
||||
if hasattr(total, 'num_requests') and total.num_requests > 0:
|
||||
logger.info(
|
||||
f"{'LOCUST STATS':<25} {'Requests':>12} {'Fails':>8} {'Avg (ms)':>12} {'Min':>8} {'Max':>8}"
|
||||
)
|
||||
logger.info("-" * 80)
|
||||
logger.info(
|
||||
f"{'Aggregated':<25} {total.num_requests:>12,d} "
|
||||
f"{total.num_failures:>8,d} "
|
||||
f"{total.avg_response_time:>12.1f} "
|
||||
f"{total.min_response_time:>8.0f} "
|
||||
f"{total.max_response_time:>8.0f}"
|
||||
)
|
||||
logger.info("=" * 80)
|
||||
|
||||
threading.Thread(target=report_stats, daemon=True).start()
|
||||
|
||||
|
||||
@events.test_stop.add_listener # type: ignore[misc]
|
||||
def on_test_stop(environment: object, **kwargs: object) -> None:
|
||||
stats = metrics.get_stats()
|
||||
test_duration = time.time() - metrics.start_time
|
||||
|
||||
# Log final results
|
||||
logger.info("\n" + "=" * 80)
|
||||
logger.info(" " * 30 + "FINAL BENCHMARK RESULTS")
|
||||
logger.info("=" * 80)
|
||||
logger.info(f"Test Duration: {test_duration:.1f} seconds")
|
||||
logger.info("-" * 80)
|
||||
|
||||
logger.info("")
|
||||
logger.info("CONNECTIONS")
|
||||
logger.info(f" {'Total Connections:':<30} {stats.total_connections:>10,d}")
|
||||
logger.info(f" {'Final Active:':<30} {stats.active_connections:>10,d}")
|
||||
logger.info(f" {'Average Rate:':<30} {stats.overall_conn_rate:>10.2f} conn/s")
|
||||
|
||||
logger.info("")
|
||||
logger.info("EVENTS")
|
||||
logger.info(f" {'Total Events Received:':<30} {stats.total_events:>10,d}")
|
||||
logger.info(
|
||||
f" {'Average Throughput:':<30} {stats.overall_event_rate:>10.2f} events/s"
|
||||
)
|
||||
logger.info(
|
||||
f" {'Final Rate (10s window):':<30} {stats.event_rate:>10.2f} events/s"
|
||||
)
|
||||
|
||||
logger.info("")
|
||||
logger.info("STREAM METRICS")
|
||||
logger.info(f" {'Avg Stream Duration:':<30} {stats.stream_duration_avg:>10.1f} ms")
|
||||
logger.info(f" {'P50 Stream Duration:':<30} {stats.stream_duration_p50:>10.1f} ms")
|
||||
logger.info(f" {'P95 Stream Duration:':<30} {stats.stream_duration_p95:>10.1f} ms")
|
||||
logger.info(
|
||||
f" {'Avg Events per Stream:':<30} {stats.events_per_stream_avg:>10.1f}"
|
||||
)
|
||||
|
||||
logger.info("")
|
||||
logger.info("INTER-EVENT LATENCY")
|
||||
logger.info(f" {'Average:':<30} {stats.inter_event_latency_avg:>10.1f} ms")
|
||||
logger.info(f" {'Median (P50):':<30} {stats.inter_event_latency_p50:>10.1f} ms")
|
||||
logger.info(f" {'95th Percentile:':<30} {stats.inter_event_latency_p95:>10.1f} ms")
|
||||
|
||||
logger.info("")
|
||||
logger.info("TIME TO FIRST EVENT (ms)")
|
||||
logger.info(f" {'Average:':<30} {stats.ttfe_avg:>10.1f} ms")
|
||||
logger.info(f" {'Median (P50):':<30} {stats.ttfe_p50:>10.1f} ms")
|
||||
logger.info(f" {'95th Percentile:':<30} {stats.ttfe_p95:>10.1f} ms")
|
||||
logger.info(f" {'Minimum:':<30} {stats.ttfe_min:>10.1f} ms")
|
||||
logger.info(f" {'Maximum:':<30} {stats.ttfe_max:>10.1f} ms")
|
||||
logger.info(f" {'Window Samples:':<30} {stats.ttfe_samples:>10,d} (last {min(10000, stats.ttfe_total_samples):,d})")
|
||||
logger.info(f" {'Total Samples:':<30} {stats.ttfe_total_samples:>10,d}")
|
||||
|
||||
# Error summary
|
||||
if any(stats.error_counts.values()):
|
||||
logger.info("")
|
||||
logger.info("ERRORS")
|
||||
for error_type, count in stats.error_counts.items():
|
||||
if isinstance(count, int) and count > 0:
|
||||
logger.info(f" {error_type:<30} {count:>10,d}")
|
||||
|
||||
logger.info("=" * 80 + "\n")
|
||||
|
||||
# Export machine-readable report (only on master node)
|
||||
is_master = not getattr(environment.runner, 'worker_id', None) if hasattr(environment, 'runner') else True
|
||||
if is_master:
|
||||
export_json_report(stats, test_duration, environment)
|
||||
|
||||
|
||||
def export_json_report(stats: MetricsSnapshot, duration: float, environment: object) -> None:
|
||||
"""Export metrics to JSON file for CI/CD analysis"""
|
||||
|
||||
reports_dir = Path(__file__).parent / "reports"
|
||||
reports_dir.mkdir(exist_ok=True)
|
||||
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
report_file = reports_dir / f"sse_metrics_{timestamp}.json"
|
||||
|
||||
# Access environment.stats.total attributes safely
|
||||
locust_stats: LocustStats | None = None
|
||||
if hasattr(environment, 'stats') and hasattr(environment.stats, 'total'):
|
||||
total = environment.stats.total
|
||||
if hasattr(total, 'num_requests') and total.num_requests > 0:
|
||||
locust_stats = LocustStats(
|
||||
total_requests=total.num_requests,
|
||||
total_failures=total.num_failures,
|
||||
avg_response_time=total.avg_response_time,
|
||||
min_response_time=total.min_response_time,
|
||||
max_response_time=total.max_response_time,
|
||||
)
|
||||
|
||||
report_data = ReportData(
|
||||
timestamp=datetime.now().isoformat(),
|
||||
duration_seconds=duration,
|
||||
metrics=asdict(stats), # type: ignore[arg-type]
|
||||
locust_stats=locust_stats,
|
||||
)
|
||||
|
||||
with open(report_file, "w") as f:
|
||||
json.dump(report_data, f, indent=2)
|
||||
|
||||
logger.info(f"Exported metrics to {report_file}")
|
||||
Reference in New Issue
Block a user