Skip to content

Commit

Permalink
Merge branch 'langgenius:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
kazuhisa-wada authored Dec 16, 2024
2 parents d13f1bc + 4b402c4 commit b40a5dc
Show file tree
Hide file tree
Showing 12 changed files with 266 additions and 75 deletions.
22 changes: 0 additions & 22 deletions api/core/app/apps/workflow/generate_task_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -394,28 +394,6 @@ def _process_stream_response(
# save workflow app log
self._save_workflow_app_log(workflow_run)

yield self._workflow_finish_to_stream_response(
task_id=self._application_generate_entity.task_id, workflow_run=workflow_run
)
elif isinstance(event, QueueWorkflowPartialSuccessEvent):
if not workflow_run:
raise Exception("Workflow run not initialized.")

if not graph_runtime_state:
raise Exception("Graph runtime state not initialized.")
workflow_run = self._handle_workflow_run_partial_success(
workflow_run=workflow_run,
start_at=graph_runtime_state.start_at,
total_tokens=graph_runtime_state.total_tokens,
total_steps=graph_runtime_state.node_run_steps,
outputs=event.outputs,
exceptions_count=event.exceptions_count,
conversation_id=None,
trace_manager=trace_manager,
)
# save workflow app log
self._save_workflow_app_log(workflow_run)

yield self._workflow_finish_to_stream_response(
task_id=self._application_generate_entity.task_id, workflow_run=workflow_run
)
Expand Down
7 changes: 6 additions & 1 deletion api/core/model_runtime/model_providers/baichuan/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
PromptMessage,
PromptMessageContentType,
PromptMessageTool,
SystemPromptMessage,
ToolPromptMessage,
Expand Down Expand Up @@ -105,7 +106,11 @@ def _convert_prompt_message_to_dict(self, message: PromptMessage) -> dict:
if isinstance(message.content, str):
message_dict = {"role": "user", "content": message.content}
else:
raise ValueError("User message content must be str")
for message_content in message.content:
if message_content.type == PromptMessageContentType.TEXT:
message_dict = {"role": "user", "content": message_content.data}
elif message_content.type == PromptMessageContentType.IMAGE:
raise ValueError("Content object type not support image_url")
elif isinstance(message, AssistantPromptMessage):
message = cast(AssistantPromptMessage, message)
message_dict = {"role": "assistant", "content": message.content}
Expand Down
3 changes: 3 additions & 0 deletions api/core/model_runtime/model_providers/deepseek/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,9 @@ def _invoke(
user: Optional[str] = None,
) -> Union[LLMResult, Generator]:
self._add_custom_parameters(credentials)
# {"response_format": "xx"} need convert to {"response_format": {"type": "xx"}}
if "response_format" in model_parameters:
model_parameters["response_format"] = {"type": model_parameters.get("response_format")}
return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream)

def validate_credentials(self, model: str, credentials: dict) -> None:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,12 @@ def convert_prompt_message_to_maas_message(message: PromptMessage) -> dict:
content = []
for message_content in message.content:
if message_content.type == PromptMessageContentType.TEXT:
raise ValueError("Content object type only support image_url")
content.append(
{
"type": "text",
"text": message_content.data,
}
)
elif message_content.type == PromptMessageContentType.IMAGE:
message_content = cast(ImagePromptMessageContent, message_content)
image_data = re.sub(r"^data:image\/[a-zA-Z]+;base64,", "", message_content.data)
Expand Down
66 changes: 66 additions & 0 deletions api/core/model_runtime/model_providers/x/llm/grok-2-1212.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
model: grok-2-1212
label:
en_US: grok-2-1212
model_type: llm
features:
- agent-thought
- tool-call
- multi-tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 131072
parameter_rules:
- name: temperature
label:
en_US: "Temperature"
zh_Hans: "采样温度"
type: float
default: 0.7
min: 0.0
max: 2.0
precision: 1
required: true
help:
en_US: "The randomness of the sampling temperature control output. The temperature value is within the range of [0.0, 1.0]. The higher the value, the more random and creative the output; the lower the value, the more stable it is. It is recommended to adjust either top_p or temperature parameters according to your needs to avoid adjusting both at the same time."
zh_Hans: "采样温度控制输出的随机性。温度值在 [0.0, 1.0] 范围内,值越高,输出越随机和创造性;值越低,输出越稳定。建议根据需求调整 top_p 或 temperature 参数,避免同时调整两者。"

- name: top_p
label:
en_US: "Top P"
zh_Hans: "Top P"
type: float
default: 0.7
min: 0.0
max: 1.0
precision: 1
required: true
help:
en_US: "The value range of the sampling method is [0.0, 1.0]. The top_p value determines that the model selects tokens from the top p% of candidate words with the highest probability; when top_p is 0, this parameter is invalid. It is recommended to adjust either top_p or temperature parameters according to your needs to avoid adjusting both at the same time."
zh_Hans: "采样方法的取值范围为 [0.0,1.0]。top_p 值确定模型从概率最高的前p%的候选词中选取 tokens;当 top_p 为 0 时,此参数无效。建议根据需求调整 top_p 或 temperature 参数,避免同时调整两者。"

- name: frequency_penalty
use_template: frequency_penalty
label:
en_US: "Frequency Penalty"
zh_Hans: "频率惩罚"
type: float
default: 0
min: 0
max: 2.0
precision: 1
required: false
help:
en_US: "Number between 0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
zh_Hans: "介于0和2.0之间的数字。正值会根据新标记在文本中迄今为止的现有频率来惩罚它们,从而降低模型一字不差地重复同一句话的可能性。"

- name: user
use_template: text
label:
en_US: "User"
zh_Hans: "用户"
type: string
required: false
help:
en_US: "Used to track and differentiate conversation requests from different users."
zh_Hans: "用于追踪和区分不同用户的对话请求。"
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
model: grok-2-vision-1212
label:
en_US: grok-2-vision-1212
model_type: llm
features:
- agent-thought
- vision
model_properties:
mode: chat
context_size: 8192
parameter_rules:
- name: temperature
label:
en_US: "Temperature"
zh_Hans: "采样温度"
type: float
default: 0.7
min: 0.0
max: 2.0
precision: 1
required: true
help:
en_US: "The randomness of the sampling temperature control output. The temperature value is within the range of [0.0, 1.0]. The higher the value, the more random and creative the output; the lower the value, the more stable it is. It is recommended to adjust either top_p or temperature parameters according to your needs to avoid adjusting both at the same time."
zh_Hans: "采样温度控制输出的随机性。温度值在 [0.0, 1.0] 范围内,值越高,输出越随机和创造性;值越低,输出越稳定。建议根据需求调整 top_p 或 temperature 参数,避免同时调整两者。"

- name: top_p
label:
en_US: "Top P"
zh_Hans: "Top P"
type: float
default: 0.7
min: 0.0
max: 1.0
precision: 1
required: true
help:
en_US: "The value range of the sampling method is [0.0, 1.0]. The top_p value determines that the model selects tokens from the top p% of candidate words with the highest probability; when top_p is 0, this parameter is invalid. It is recommended to adjust either top_p or temperature parameters according to your needs to avoid adjusting both at the same time."
zh_Hans: "采样方法的取值范围为 [0.0,1.0]。top_p 值确定模型从概率最高的前p%的候选词中选取 tokens;当 top_p 为 0 时,此参数无效。建议根据需求调整 top_p 或 temperature 参数,避免同时调整两者。"

- name: frequency_penalty
use_template: frequency_penalty
label:
en_US: "Frequency Penalty"
zh_Hans: "频率惩罚"
type: float
default: 0
min: 0
max: 2.0
precision: 1
required: false
help:
en_US: "Number between 0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
zh_Hans: "介于0和2.0之间的数字。正值会根据新标记在文本中迄今为止的现有频率来惩罚它们,从而降低模型一字不差地重复同一句话的可能性。"

- name: user
use_template: text
label:
en_US: "User"
zh_Hans: "用户"
type: string
required: false
help:
en_US: "Used to track and differentiate conversation requests from different users."
zh_Hans: "用于追踪和区分不同用户的对话请求。"
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
model: grok-beta
label:
en_US: Grok Beta
en_US: grok-beta
model_type: llm
features:
- agent-thought
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
model: grok-vision-beta
label:
en_US: Grok Vision Beta
en_US: grok-vision-beta
model_type: llm
features:
- agent-thought
Expand Down
75 changes: 41 additions & 34 deletions api/core/workflow/nodes/http_request/executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
class Executor:
method: Literal["get", "head", "post", "put", "delete", "patch"]
url: str
params: Mapping[str, str] | None
params: list[tuple[str, str]] | None
content: str | bytes | None
data: Mapping[str, Any] | None
files: Mapping[str, tuple[str | None, bytes, str]] | None
Expand Down Expand Up @@ -67,7 +67,7 @@ def __init__(
self.method = node_data.method
self.auth = node_data.authorization
self.timeout = timeout
self.params = {}
self.params = []
self.headers = {}
self.content = None
self.files = None
Expand All @@ -89,14 +89,48 @@ def _init_url(self):
self.url = self.variable_pool.convert_template(self.node_data.url).text

def _init_params(self):
params = _plain_text_to_dict(self.node_data.params)
for key in params:
params[key] = self.variable_pool.convert_template(params[key]).text
self.params = params
"""
Almost same as _init_headers(), difference:
1. response a list tuple to support same key, like 'aa=1&aa=2'
2. param value may have '\n', we need to splitlines then extract the variable value.
"""
result = []
for line in self.node_data.params.splitlines():
if not (line := line.strip()):
continue

key, *value = line.split(":", 1)
if not (key := key.strip()):
continue

value = value[0].strip() if value else ""
result.append(
(self.variable_pool.convert_template(key).text, self.variable_pool.convert_template(value).text)
)

self.params = result

def _init_headers(self):
"""
Convert the header string of frontend to a dictionary.
Each line in the header string represents a key-value pair.
Keys and values are separated by ':'.
Empty values are allowed.
Examples:
'aa:bb\n cc:dd' -> {'aa': 'bb', 'cc': 'dd'}
'aa:\n cc:dd\n' -> {'aa': '', 'cc': 'dd'}
'aa\n cc : dd' -> {'aa': '', 'cc': 'dd'}
"""
headers = self.variable_pool.convert_template(self.node_data.headers).text
self.headers = _plain_text_to_dict(headers)
self.headers = {
key.strip(): (value[0].strip() if value else "")
for line in headers.splitlines()
if line.strip()
for key, *value in [line.split(":", 1)]
}

def _init_body(self):
body = self.node_data.body
Expand Down Expand Up @@ -288,33 +322,6 @@ def to_log(self):
return raw


def _plain_text_to_dict(text: str, /) -> dict[str, str]:
"""
Convert a string of key-value pairs to a dictionary.
Each line in the input string represents a key-value pair.
Keys and values are separated by ':'.
Empty values are allowed.
Examples:
'aa:bb\n cc:dd' -> {'aa': 'bb', 'cc': 'dd'}
'aa:\n cc:dd\n' -> {'aa': '', 'cc': 'dd'}
'aa\n cc : dd' -> {'aa': '', 'cc': 'dd'}
Args:
convert_text (str): The input string to convert.
Returns:
dict[str, str]: A dictionary of key-value pairs.
"""
return {
key.strip(): (value[0].strip() if value else "")
for line in text.splitlines()
if line.strip()
for key, *value in [line.split(":", 1)]
}


def _generate_random_string(n: int) -> str:
"""
Generate a random string of lowercase ASCII letters.
Expand Down
6 changes: 4 additions & 2 deletions api/models/workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,8 +225,10 @@ def tool_published(self) -> bool:
from models.tools import WorkflowToolProvider

return (
db.session.query(WorkflowToolProvider).filter(WorkflowToolProvider.app_id == self.app_id).first()
is not None
db.session.query(WorkflowToolProvider)
.filter(WorkflowToolProvider.tenant_id == self.tenant_id, WorkflowToolProvider.app_id == self.app_id)
.count()
> 0
)

@property
Expand Down
Loading

0 comments on commit b40a5dc

Please sign in to comment.