Compare commits

...

3 Commits

Author SHA1 Message Date
John Wang
b51b0e7837 feat: add callback qualification verify api for spark 2023-09-09 23:36:47 +08:00
John Wang
70ecf89428 fix: hf hosted inference llm call 2023-09-09 00:28:49 +08:00
John Wang
0aa81a4b76 fix: hf hosted inference enabled check 2023-09-09 00:05:44 +08:00
5 changed files with 123 additions and 5 deletions

View File

@@ -285,6 +285,20 @@ class ModelProviderFreeQuotaSubmitApi(Resource):
return result
class ModelProviderFreeQuotaQualificationVerifyApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, provider_name: str):
provider_service = ProviderService()
result = provider_service.free_quota_qualification_verify(
tenant_id=current_user.current_tenant_id,
provider_name=provider_name
)
return result
api.add_resource(ModelProviderListApi, '/workspaces/current/model-providers')
api.add_resource(ModelProviderValidateApi, '/workspaces/current/model-providers/<string:provider_name>/validate')
api.add_resource(ModelProviderUpdateApi, '/workspaces/current/model-providers/<string:provider_name>')
@@ -300,3 +314,5 @@ api.add_resource(ModelProviderPaymentCheckoutUrlApi,
'/workspaces/current/model-providers/<string:provider_name>/checkout-url')
api.add_resource(ModelProviderFreeQuotaSubmitApi,
'/workspaces/current/model-providers/<string:provider_name>/free-quota-submit')
api.add_resource(ModelProviderFreeQuotaQualificationVerifyApi,
'/workspaces/current/model-providers/<string:provider_name>/free-quota-qualification-verify')

View File

@@ -1,6 +1,5 @@
from typing import List, Optional, Any
from langchain import HuggingFaceHub
from langchain.callbacks.manager import Callbacks
from langchain.schema import LLMResult
@@ -9,6 +8,7 @@ from core.model_providers.models.llm.base import BaseLLM
from core.model_providers.models.entity.message import PromptMessage
from core.model_providers.models.entity.model_params import ModelMode, ModelKwargs
from core.third_party.langchain.llms.huggingface_endpoint_llm import HuggingFaceEndpointLLM
from core.third_party.langchain.llms.huggingface_hub_llm import HuggingFaceHubLLM
class HuggingfaceHubModel(BaseLLM):
@@ -31,7 +31,7 @@ class HuggingfaceHubModel(BaseLLM):
streaming=streaming
)
else:
client = HuggingFaceHub(
client = HuggingFaceHubLLM(
repo_id=self.name,
task=self.credentials['task_type'],
model_kwargs=provider_model_kwargs,
@@ -88,4 +88,6 @@ class HuggingfaceHubModel(BaseLLM):
if 'baichuan' in self.name.lower():
return False
return True
return True
else:
return False

View File

@@ -89,7 +89,8 @@ class HuggingfaceHubProvider(BaseModelProvider):
raise CredentialsValidateFailedError('Task Type must be provided.')
if credentials['task_type'] not in ("text2text-generation", "text-generation", "summarization"):
raise CredentialsValidateFailedError('Task Type must be one of text2text-generation, text-generation, summarization.')
raise CredentialsValidateFailedError('Task Type must be one of text2text-generation, '
'text-generation, summarization.')
try:
llm = HuggingFaceEndpointLLM(

View File

@@ -0,0 +1,62 @@
from typing import Dict, Optional, List, Any
from huggingface_hub import HfApi, InferenceApi
from langchain import HuggingFaceHub
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.huggingface_hub import VALID_TASKS
from pydantic import root_validator
from langchain.utils import get_from_dict_or_env
class HuggingFaceHubLLM(HuggingFaceHub):
"""HuggingFaceHub models.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Only supports `text-generation`, `text2text-generation` and `summarization` for now.
Example:
.. code-block:: python
from langchain.llms import HuggingFaceHub
hf = HuggingFaceHub(repo_id="gpt2", huggingfacehub_api_token="my-api-key")
"""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
client = InferenceApi(
repo_id=values["repo_id"],
token=huggingfacehub_api_token,
task=values.get("task"),
)
client.options = {"wait_for_model": False, "use_gpu": False}
values["client"] = client
return values
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
hfapi = HfApi(token=self.huggingfacehub_api_token)
model_info = hfapi.model_info(repo_id=self.repo_id)
if not model_info:
raise ValueError(f"Model {self.repo_id} not found.")
if 'inference' in model_info.cardData and not model_info.cardData['inference']:
raise ValueError(f"Inference API has been turned off for this model {self.repo_id}.")
if model_info.pipeline_tag not in VALID_TASKS:
raise ValueError(f"Model {self.repo_id} is not a valid task, "
f"must be one of {VALID_TASKS}.")
return super()._call(prompt, stop, run_manager, **kwargs)

View File

@@ -518,7 +518,8 @@ class ProviderService:
def free_quota_submit(self, tenant_id: str, provider_name: str):
api_key = os.environ.get("FREE_QUOTA_APPLY_API_KEY")
api_url = os.environ.get("FREE_QUOTA_APPLY_URL")
api_base_url = os.environ.get("FREE_QUOTA_APPLY_BASE_URL")
api_url = api_base_url + '/api/v1/providers/apply'
headers = {
'Content-Type': 'application/json',
@@ -546,3 +547,39 @@ class ProviderService:
'type': rst['type'],
'result': 'success'
}
def free_quota_qualification_verify(self, tenant_id: str, provider_name: str):
api_key = os.environ.get("FREE_QUOTA_APPLY_API_KEY")
api_base_url = os.environ.get("FREE_QUOTA_APPLY_BASE_URL")
api_url = api_base_url + '/api/v1/providers/qualification-verify'
headers = {
'Content-Type': 'application/json',
'Authorization': f"Bearer {api_key}"
}
response = requests.post(api_url, headers=headers,
json={'workspace_id': tenant_id, 'provider_name': provider_name})
if not response.ok:
logging.error(f"Request FREE QUOTA APPLY SERVER Error: {response.status_code} ")
raise ValueError(f"Error: {response.status_code} ")
rst = response.json()
if rst["code"] != 'success':
raise ValueError(
f"error: {rst['message']}"
)
data = rst['data']
if data['qualified'] is True:
return {
'result': 'success',
'provider_name': provider_name,
'flag': True
}
else:
return {
'result': 'success',
'provider_name': provider_name,
'flag': False,
'reason': data['reason']
}