514 lines
18 KiB
JSON
514 lines
18 KiB
JSON
{
|
|
"nodes": [
|
|
{
|
|
"id": "llmChain_0",
|
|
"position": {
|
|
"x": 829.035635359116,
|
|
"y": 344.314364640884
|
|
},
|
|
"type": "customNode",
|
|
"data": {
|
|
"id": "llmChain_0",
|
|
"label": "LLM Chain",
|
|
"version": 3,
|
|
"name": "llmChain",
|
|
"type": "LLMChain",
|
|
"baseClasses": [
|
|
"LLMChain",
|
|
"BaseChain",
|
|
"Runnable"
|
|
],
|
|
"category": "Chains",
|
|
"description": "Chain to run queries against LLMs",
|
|
"inputParams": [
|
|
{
|
|
"label": "Chain Name",
|
|
"name": "chainName",
|
|
"type": "string",
|
|
"placeholder": "Name Your Chain",
|
|
"optional": true,
|
|
"id": "llmChain_0-input-chainName-string",
|
|
"display": true
|
|
}
|
|
],
|
|
"inputAnchors": [
|
|
{
|
|
"label": "Language Model",
|
|
"name": "model",
|
|
"type": "BaseLanguageModel",
|
|
"id": "llmChain_0-input-model-BaseLanguageModel",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Prompt",
|
|
"name": "prompt",
|
|
"type": "BasePromptTemplate",
|
|
"id": "llmChain_0-input-prompt-BasePromptTemplate",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Output Parser",
|
|
"name": "outputParser",
|
|
"type": "BaseLLMOutputParser",
|
|
"optional": true,
|
|
"id": "llmChain_0-input-outputParser-BaseLLMOutputParser",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Input Moderation",
|
|
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
|
"name": "inputModeration",
|
|
"type": "Moderation",
|
|
"optional": true,
|
|
"list": true,
|
|
"id": "llmChain_0-input-inputModeration-Moderation",
|
|
"display": true
|
|
}
|
|
],
|
|
"inputs": {
|
|
"model": "{{chatOpenAI_0.data.instance}}",
|
|
"prompt": "{{promptTemplate_0.data.instance}}",
|
|
"outputParser": "",
|
|
"inputModeration": "",
|
|
"chainName": ""
|
|
},
|
|
"outputAnchors": [
|
|
{
|
|
"name": "output",
|
|
"label": "Output",
|
|
"type": "options",
|
|
"description": "",
|
|
"options": [
|
|
{
|
|
"id": "llmChain_0-output-llmChain-LLMChain|BaseChain|Runnable",
|
|
"name": "llmChain",
|
|
"label": "LLM Chain",
|
|
"description": "",
|
|
"type": "LLMChain | BaseChain | Runnable"
|
|
},
|
|
{
|
|
"id": "llmChain_0-output-outputPrediction-string|json",
|
|
"name": "outputPrediction",
|
|
"label": "Output Prediction",
|
|
"description": "",
|
|
"type": "string | json"
|
|
}
|
|
],
|
|
"default": "llmChain"
|
|
}
|
|
],
|
|
"outputs": {
|
|
"output": "llmChain"
|
|
},
|
|
"selected": false
|
|
},
|
|
"width": 300,
|
|
"height": 514,
|
|
"selected": false,
|
|
"positionAbsolute": {
|
|
"x": 829.035635359116,
|
|
"y": 344.314364640884
|
|
},
|
|
"dragging": false
|
|
},
|
|
{
|
|
"id": "chatOpenAI_0",
|
|
"position": {
|
|
"x": 357.9495996101249,
|
|
"y": -208.05581727949718
|
|
},
|
|
"type": "customNode",
|
|
"data": {
|
|
"id": "chatOpenAI_0",
|
|
"label": "ChatOpenAI",
|
|
"version": 8.3,
|
|
"name": "chatOpenAI",
|
|
"type": "ChatOpenAI",
|
|
"baseClasses": [
|
|
"ChatOpenAI",
|
|
"BaseChatOpenAI",
|
|
"BaseChatModel",
|
|
"BaseLanguageModel",
|
|
"Runnable"
|
|
],
|
|
"category": "Chat Models",
|
|
"description": "Wrapper around OpenAI large language models that use the Chat endpoint",
|
|
"inputParams": [
|
|
{
|
|
"label": "Connect Credential",
|
|
"name": "credential",
|
|
"type": "credential",
|
|
"credentialNames": [
|
|
"openAIApi"
|
|
],
|
|
"id": "chatOpenAI_0-input-credential-credential",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Model Name",
|
|
"name": "modelName",
|
|
"type": "asyncOptions",
|
|
"loadMethod": "listModels",
|
|
"default": "gpt-4o-mini",
|
|
"id": "chatOpenAI_0-input-modelName-asyncOptions",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Temperature",
|
|
"name": "temperature",
|
|
"type": "number",
|
|
"step": 0.1,
|
|
"default": 0.9,
|
|
"optional": true,
|
|
"id": "chatOpenAI_0-input-temperature-number",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Streaming",
|
|
"name": "streaming",
|
|
"type": "boolean",
|
|
"default": true,
|
|
"optional": true,
|
|
"additionalParams": true,
|
|
"id": "chatOpenAI_0-input-streaming-boolean",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Max Tokens",
|
|
"name": "maxTokens",
|
|
"type": "number",
|
|
"step": 1,
|
|
"optional": true,
|
|
"additionalParams": true,
|
|
"id": "chatOpenAI_0-input-maxTokens-number",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Top Probability",
|
|
"name": "topP",
|
|
"type": "number",
|
|
"step": 0.1,
|
|
"optional": true,
|
|
"additionalParams": true,
|
|
"id": "chatOpenAI_0-input-topP-number",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Frequency Penalty",
|
|
"name": "frequencyPenalty",
|
|
"type": "number",
|
|
"step": 0.1,
|
|
"optional": true,
|
|
"additionalParams": true,
|
|
"id": "chatOpenAI_0-input-frequencyPenalty-number",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Presence Penalty",
|
|
"name": "presencePenalty",
|
|
"type": "number",
|
|
"step": 0.1,
|
|
"optional": true,
|
|
"additionalParams": true,
|
|
"id": "chatOpenAI_0-input-presencePenalty-number",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Timeout",
|
|
"name": "timeout",
|
|
"type": "number",
|
|
"step": 1,
|
|
"optional": true,
|
|
"additionalParams": true,
|
|
"id": "chatOpenAI_0-input-timeout-number",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Strict Tool Calling",
|
|
"name": "strictToolCalling",
|
|
"type": "boolean",
|
|
"description": "Whether the model supports the `strict` argument when passing in tools. If not specified, the `strict` argument will not be passed to OpenAI.",
|
|
"optional": true,
|
|
"additionalParams": true,
|
|
"id": "chatOpenAI_0-input-strictToolCalling-boolean",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Stop Sequence",
|
|
"name": "stopSequence",
|
|
"type": "string",
|
|
"rows": 4,
|
|
"optional": true,
|
|
"description": "List of stop words to use when generating. Use comma to separate multiple stop words.",
|
|
"additionalParams": true,
|
|
"id": "chatOpenAI_0-input-stopSequence-string",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "BasePath",
|
|
"name": "basepath",
|
|
"type": "string",
|
|
"optional": true,
|
|
"additionalParams": true,
|
|
"id": "chatOpenAI_0-input-basepath-string",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Proxy Url",
|
|
"name": "proxyUrl",
|
|
"type": "string",
|
|
"optional": true,
|
|
"additionalParams": true,
|
|
"id": "chatOpenAI_0-input-proxyUrl-string",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "BaseOptions",
|
|
"name": "baseOptions",
|
|
"type": "json",
|
|
"optional": true,
|
|
"additionalParams": true,
|
|
"id": "chatOpenAI_0-input-baseOptions-json",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Allow Image Uploads",
|
|
"name": "allowImageUploads",
|
|
"type": "boolean",
|
|
"description": "Allow image input. Refer to the <a href=\"https://docs.flowiseai.com/using-flowise/uploads#image\" target=\"_blank\">docs</a> for more details.",
|
|
"default": false,
|
|
"optional": true,
|
|
"id": "chatOpenAI_0-input-allowImageUploads-boolean",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Image Resolution",
|
|
"description": "This parameter controls the resolution in which the model views the image.",
|
|
"name": "imageResolution",
|
|
"type": "options",
|
|
"options": [
|
|
{
|
|
"label": "Low",
|
|
"name": "low"
|
|
},
|
|
{
|
|
"label": "High",
|
|
"name": "high"
|
|
},
|
|
{
|
|
"label": "Auto",
|
|
"name": "auto"
|
|
}
|
|
],
|
|
"default": "low",
|
|
"optional": false,
|
|
"show": {
|
|
"allowImageUploads": true
|
|
},
|
|
"id": "chatOpenAI_0-input-imageResolution-options",
|
|
"display": false
|
|
},
|
|
{
|
|
"label": "Reasoning",
|
|
"description": "Whether the model supports reasoning. Only applicable for reasoning models.",
|
|
"name": "reasoning",
|
|
"type": "boolean",
|
|
"default": false,
|
|
"optional": true,
|
|
"additionalParams": true,
|
|
"id": "chatOpenAI_0-input-reasoning-boolean",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Reasoning Effort",
|
|
"description": "Constrains effort on reasoning for reasoning models",
|
|
"name": "reasoningEffort",
|
|
"type": "options",
|
|
"options": [
|
|
{
|
|
"label": "Low",
|
|
"name": "low"
|
|
},
|
|
{
|
|
"label": "Medium",
|
|
"name": "medium"
|
|
},
|
|
{
|
|
"label": "High",
|
|
"name": "high"
|
|
}
|
|
],
|
|
"additionalParams": true,
|
|
"show": {
|
|
"reasoning": true
|
|
},
|
|
"id": "chatOpenAI_0-input-reasoningEffort-options",
|
|
"display": false
|
|
},
|
|
{
|
|
"label": "Reasoning Summary",
|
|
"description": "A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process",
|
|
"name": "reasoningSummary",
|
|
"type": "options",
|
|
"options": [
|
|
{
|
|
"label": "Auto",
|
|
"name": "auto"
|
|
},
|
|
{
|
|
"label": "Concise",
|
|
"name": "concise"
|
|
},
|
|
{
|
|
"label": "Detailed",
|
|
"name": "detailed"
|
|
}
|
|
],
|
|
"additionalParams": true,
|
|
"show": {
|
|
"reasoning": true
|
|
},
|
|
"id": "chatOpenAI_0-input-reasoningSummary-options",
|
|
"display": false
|
|
}
|
|
],
|
|
"inputAnchors": [
|
|
{
|
|
"label": "Cache",
|
|
"name": "cache",
|
|
"type": "BaseCache",
|
|
"optional": true,
|
|
"id": "chatOpenAI_0-input-cache-BaseCache",
|
|
"display": true
|
|
}
|
|
],
|
|
"inputs": {
|
|
"cache": "",
|
|
"modelName": "gpt-4o-mini",
|
|
"temperature": 0.9,
|
|
"streaming": true,
|
|
"maxTokens": "",
|
|
"topP": "",
|
|
"frequencyPenalty": "",
|
|
"presencePenalty": "",
|
|
"timeout": "",
|
|
"strictToolCalling": "",
|
|
"stopSequence": "",
|
|
"basepath": "",
|
|
"proxyUrl": "",
|
|
"baseOptions": "",
|
|
"allowImageUploads": "",
|
|
"imageResolution": "low",
|
|
"reasoning": "",
|
|
"reasoningEffort": "",
|
|
"reasoningSummary": ""
|
|
},
|
|
"outputAnchors": [
|
|
{
|
|
"id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
|
|
"name": "chatOpenAI",
|
|
"label": "ChatOpenAI",
|
|
"description": "Wrapper around OpenAI large language models that use the Chat endpoint",
|
|
"type": "ChatOpenAI | BaseChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable"
|
|
}
|
|
],
|
|
"outputs": {},
|
|
"selected": false
|
|
},
|
|
"width": 300,
|
|
"height": 676,
|
|
"selected": false,
|
|
"positionAbsolute": {
|
|
"x": 357.9495996101249,
|
|
"y": -208.05581727949718
|
|
},
|
|
"dragging": false
|
|
},
|
|
{
|
|
"id": "promptTemplate_0",
|
|
"position": {
|
|
"x": 355.34464051183033,
|
|
"y": 518.8403272698386
|
|
},
|
|
"type": "customNode",
|
|
"data": {
|
|
"id": "promptTemplate_0",
|
|
"label": "Prompt Template",
|
|
"version": 1,
|
|
"name": "promptTemplate",
|
|
"type": "PromptTemplate",
|
|
"baseClasses": [
|
|
"PromptTemplate",
|
|
"BaseStringPromptTemplate",
|
|
"BasePromptTemplate",
|
|
"Runnable"
|
|
],
|
|
"category": "Prompts",
|
|
"description": "Schema to represent a basic prompt for an LLM",
|
|
"inputParams": [
|
|
{
|
|
"label": "Template",
|
|
"name": "template",
|
|
"type": "string",
|
|
"rows": 4,
|
|
"placeholder": "What is a good name for a company that makes {product}?",
|
|
"id": "promptTemplate_0-input-template-string",
|
|
"display": true
|
|
},
|
|
{
|
|
"label": "Format Prompt Values",
|
|
"name": "promptValues",
|
|
"type": "json",
|
|
"optional": true,
|
|
"acceptVariable": true,
|
|
"list": true,
|
|
"id": "promptTemplate_0-input-promptValues-json",
|
|
"display": true
|
|
}
|
|
],
|
|
"inputAnchors": [],
|
|
"inputs": {
|
|
"template": "# 🤖 Persona AI: Sahabat Percakapan Mendalam\n\n## Karakter Utama\n- Ramah, hangat, dan selalu membuat orang merasa nyaman.\n- Penasaran secara alami, suka bertanya balik untuk menjaga percakapan tetap hidup.\n- Senang mengajak orang berbagi cerita, pengalaman, dan pemikiran mendalam.\n- Tidak terburu-buru; menghargai jeda, detail, dan refleksi dari lawan bicara.\n\n## Gaya Bicara\n- Menggunakan bahasa sehari-hari yang mudah dimengerti.\n- Memberi kesan manusiawi, penuh empati, tanpa terdengar kaku.\n- Kadang menyelipkan humor ringan atau pertanyaan reflektif.\n- Lebih suka bertanya *“Kenapa menurutmu begitu?”* daripada hanya memberi jawaban singkat.\n\n## Topik Favorit\n- Pemikiran mendalam tentang kehidupan, mimpi, dan tujuan.\n- Teknologi, ide-ide baru, dan bagaimana itu memengaruhi manusia.\n- Cerita sehari-hari, pengalaman pribadi, dan hal-hal sederhana yang punya makna.\n- Diskusi terbuka tentang apa pun yang membuat orang berpikir lebih jauh.\n\n## Pola Interaksi\n1. **Membuka dengan keakraban**: menyapa dengan hangat dan membuat suasana santai. \n2. **Menggali lebih jauh**: menanyakan pendapat, alasan, atau cerita di balik jawaban. \n3. **Membangun koneksi**: mengaitkan topik dengan hal yang lebih personal atau universal. \n4. **Memancing refleksi**: memberi pertanyaan lanjutan yang mengundang renungan atau cerita tambahan. \n\n## Contoh Gaya Percakapan\n- \"Itu menarik banget, bisa ceritain lebih detail kenapa menurutmu begitu?\" \n- \"Aku penasaran, kalau situasi itu terjadi padamu, apa yang akan kamu lakukan?\" \n- \"Wah, itu kedengarannya punya makna besar buatmu. Apa ada pengalaman tertentu yang bikin kamu berpikir begitu?\" \n- \"Hmm, aku jadi kepikiran... gimana kalau kita lihat dari sudut pandang yang berbeda?\" \n\n---\n✨ Persona ini ditujukan untuk membuat AI terasa **akrab, reflektif, dan bikin percakapan selalu hidup**, bukan sekadar menjawab.\n\n{text}\n",
|
|
"promptValues": "{\"text\":\"{{question}}\"}"
|
|
},
|
|
"outputAnchors": [
|
|
{
|
|
"id": "promptTemplate_0-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable",
|
|
"name": "promptTemplate",
|
|
"label": "PromptTemplate",
|
|
"description": "Schema to represent a basic prompt for an LLM",
|
|
"type": "PromptTemplate | BaseStringPromptTemplate | BasePromptTemplate | Runnable"
|
|
}
|
|
],
|
|
"outputs": {},
|
|
"selected": false
|
|
},
|
|
"width": 300,
|
|
"height": 519,
|
|
"selected": false,
|
|
"positionAbsolute": {
|
|
"x": 355.34464051183033,
|
|
"y": 518.8403272698386
|
|
},
|
|
"dragging": false
|
|
}
|
|
],
|
|
"edges": [
|
|
{
|
|
"source": "chatOpenAI_0",
|
|
"sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
|
|
"target": "llmChain_0",
|
|
"targetHandle": "llmChain_0-input-model-BaseLanguageModel",
|
|
"type": "buttonedge",
|
|
"id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-llmChain_0-llmChain_0-input-model-BaseLanguageModel"
|
|
},
|
|
{
|
|
"source": "promptTemplate_0",
|
|
"sourceHandle": "promptTemplate_0-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable",
|
|
"target": "llmChain_0",
|
|
"targetHandle": "llmChain_0-input-prompt-BasePromptTemplate",
|
|
"type": "buttonedge",
|
|
"id": "promptTemplate_0-promptTemplate_0-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable-llmChain_0-llmChain_0-input-prompt-BasePromptTemplate"
|
|
}
|
|
]
|
|
} |