# ReMe 容器化部署踩坑记录 - Github:https://github.com/agentscope-ai/ReMe - Release:https://github.com/agentscope-ai/ReMe/releases - 使用的版本:https://github.com/agentscope-ai/ReMe/releases/tag/v0.3.1.8 - 快速上手:https://github.com/agentscope-ai/ReMe/blob/v0.3.1.8/docs/quick_start.md ## Dockerfile ``` FROM ubuntu:24.04 # 1. 更换为中科大 (USTC) 源 # Ubuntu 24.04 使用 DEB822 格式,配置文件在 /etc/apt/sources.list.d/ubuntu.sources # 我们需要将 URIs 字段中的默认地址替换为中科大镜像地址 RUN sed -i 's|http://archive.ubuntu.com/ubuntu|http://mirrors.ustc.edu.cn/ubuntu|g' /etc/apt/sources.list.d/ubuntu.sources && \ sed -i 's|http://security.ubuntu.com/ubuntu|http://mirrors.ustc.edu.cn/ubuntu|g' /etc/apt/sources.list.d/ubuntu.sources ENV DEBIAN_FRONTEND=noninteractive \ TZ=Asia/Shanghai # 2. 安装基础依赖 RUN apt-get update && apt-get install -y \ locales \ tzdata \ ca-certificates \ python3 \ python3-venv \ python3-pip \ && rm -rf /var/lib/apt/lists/* # 3. 设置语言环境 RUN locale-gen zh_CN.UTF-8 en_US.UTF-8 ENV LANG=zh_CN.UTF-8 \ LANGUAGE=zh_CN:zh \ LC_ALL=zh_CN.UTF-8 # 4. 设置时区 RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \ dpkg-reconfigure -f noninteractive tzdata WORKDIR /opt # 5. 复制项目文件 COPY ReMe-0.3.1.8 ./reme # 6. 创建虚拟环境并安装依赖 RUN cd reme \ && python3 -m venv .venv \ && .venv/bin/pip install --no-cache-dir --upgrade pip \ && .venv/bin/pip install --no-cache-dir -e ".[light]" # 7. 设置环境变量 ENV PATH="/opt/reme/.venv/bin:$PATH" WORKDIR /opt/reme ``` ## docker-compose.yaml ```yaml services: reme: image: reme:0.3.1.8 container_name: reme restart: on-failure:3 volumes: - ./local_vector_store:/opt/reme/local_vector_store - ./.env:/opt/reme/.env ports: - "28202:28202" command: [ "reme", "http.port=28202", "llm.default.model_name=qwen2.5", "embedding_model.default.model_name=bge-m3", "vector_store.default.backend=local" ] ``` ## .env 文件的内容 ``` LLM_API_KEY=sk-*** LLM_BASE_URL=http://IP:PORT/v1 EMBEDDING_API_KEY=sk-*** EMBEDDING_BASE_URL=http://IP:PORT/v1 ``` ## 通过 agentscope-java 构建的 demo ```java package ai.agentscope.example.memory; import ai.agentscope.example.conf.LlmConfig; import cn.hutool.core.date.DatePattern; import cn.hutool.core.util.StrUtil; import io.agentscope.core.ReActAgent; import io.agentscope.core.memory.LongTermMemoryMode; import io.agentscope.core.memory.reme.ReMeLongTermMemory; import io.agentscope.core.message.Msg; import io.agentscope.core.message.MsgRole; import lombok.RequiredArgsConstructor; import org.springframework.beans.factory.annotation.Value; import org.springframework.boot.CommandLineRunner; import org.springframework.boot.autoconfigure.condition.ConditionalOnBooleanProperty; import org.springframework.stereotype.Component; import java.time.LocalDateTime; import java.util.Optional; import java.util.Scanner; @Component @ConditionalOnBooleanProperty(value = "ENABLE_RE_ME_TEST") @RequiredArgsConstructor public class ReMeMemoryCmdRunner implements CommandLineRunner { private final LlmConfig llmConfig; @Value("${RE_ME_BASE_URL}") private String longTermMemoryBaseUrl; @Override public void run(String... args) throws Exception { System.out.println("-".repeat(40)); System.out.printf("Start %s%n", this.getClass().getSimpleName()); System.out.println("-".repeat(40)); Scanner scanner = new Scanner(System.in); for (; ; ) { System.out.print(">>>You:"); String input = scanner.nextLine(); if (StrUtil.isBlank(input)) { continue; } if ("q".equals(input) || "exit".equals(input)) { break; } Msg assistantMsg = this.agent().call(this.readUserMsg(input)).block(); System.out.printf(""" >>>LLM: %s %n""", Optional.ofNullable(assistantMsg).map(Msg::getTextContent).orElse("没有获取到任何输出")); } } private Msg readUserMsg(String input) { return Msg.builder() .timestamp(LocalDateTime.now().format(DatePattern.PURE_DATETIME_MS_FORMATTER)) .role(MsgRole.USER) .textContent(input) .build(); } private ReActAgent agent() { return ReActAgent.builder() .name("ReMeMemoryAgent") .model(llmConfig.model()) .longTermMemory(this.longTermMemory()) .longTermMemoryMode(LongTermMemoryMode.STATIC_CONTROL) .build(); } private ReMeLongTermMemory longTermMemory() { return ReMeLongTermMemory.builder() .userId("example_user_id") .apiBaseUrl(this.longTermMemoryBaseUrl) .build(); } } ``` ## 坑点1、因环境变量错误导致 API 调用失败(已解决) ``` 2026-04-07 15:56:51 | ERROR | base_embedding_model.py:126 | embedding model name=bge-negative-train encounter error with e=('Connection error.',) Traceback (most recent call last): File "/opt/reme/.venv/lib/python3.12/site-packages/httpx/_transports/default.py", line 101, in map_httpcore_exceptions yield File "/opt/reme/.venv/lib/python3.12/site-packages/httpx/_transports/default.py", line 394, in handle_async_request resp = await self._pool.handle_async_request(req) │ │ │ └ │ │ └ │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/httpcore/_async/connection_pool.py", line 207, in handle_async_request raise UnsupportedProtocol( └ httpcore.UnsupportedProtocol: Request URL is missing an 'http://' or 'https://' protocol. The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/opt/reme/.venv/lib/python3.12/site-packages/openai/_base_client.py", line 1604, in request response = await self._client.send( │ │ └ │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/httpx/_client.py", line 1629, in send response = await self._send_handling_auth( │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/httpx/_client.py", line 1657, in _send_handling_auth response = await self._send_handling_redirects( │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/httpx/_client.py", line 1694, in _send_handling_redirects response = await self._send_single_request(request) │ │ └ │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/httpx/_client.py", line 1730, in _send_single_request response = await transport.handle_async_request(request) │ │ └ │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/httpx/_transports/default.py", line 393, in handle_async_request with map_httpcore_exceptions(): └ File "/usr/lib/python3.12/contextlib.py", line 158, in __exit__ self.gen.throw(value) │ │ │ └ UnsupportedProtocol("Request URL is missing an 'http://' or 'https://' protocol.") │ │ └ │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/httpx/_transports/default.py", line 118, in map_httpcore_exceptions raise mapped_exc(message) from exc │ └ "Request URL is missing an 'http://' or 'https://' protocol." └ httpx.UnsupportedProtocol: Request URL is missing an 'http://' or 'https://' protocol. The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/opt/reme/.venv/bin/reme", line 6, in sys.exit(main()) │ │ └ │ └ File "/opt/reme/reme_ai/main.py", line 231, in main app.run_service() │ └  └  File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/application.py", line 384, in run_service service.run() │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/service/http_service.py", line 123, in run uvicorn.run( │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/uvicorn/main.py", line 606, in run server.run() │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/uvicorn/server.py", line 75, in run return asyncio_run(self.serve(sockets=sockets), loop_factory=self.config.get_loop_factory()) │ │ │ │ │ │ └ │ │ │ │ │ └ │ │ │ │ └ │ │ │ └ None │ │ └ │ └ File "/usr/lib/python3.12/asyncio/runners.py", line 194, in run return runner.run(main) │ │ └ │ └ File "/usr/lib/python3.12/asyncio/runners.py", line 118, in run return self._loop.run_until_complete(task) │ │ │ └ │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/op/base_async_op.py", line 185, in async_call result = await self.async_execute() │ └ File "/opt/reme/reme_ai/summary/personal/load_today_memory_op.py", line 49, in async_execute today_memories = await self._retrieve_today_memories(workspace_id, user_name, top_k)  │ │ │ │ └ 50  │ │ │ └ 'user'  │ │ └ 'example_user_id'  │ └   └  File "/opt/reme/reme_ai/summary/personal/load_today_memory_op.py", line 87, in _retrieve_today_memories nodes: List[VectorNode] = await self.vector_store.async_search(  │ │ │ └   │ │ └   │ └   └ typing.List File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/vector_store/local_vector_store.py", line 512, in async_search return await super().async_search(query, workspace_id, top_k, filter_dict, **kwargs) │ │ │ │ └ {} │ │ │ └ {'memory_type': 'personal', 'target': 'user', 'created_date': '20260407'} │ │ └ 50 │ └ 'example_user_id' └ ' ' File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/vector_store/memory_vector_store.py", line 673, in async_search query_vector = await self.async_get_embeddings(query) if use_vector_search else None │ │ │ └ True │ │ └ ' ' │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/vector_store/base_vector_store.py", line 123, in async_get_embeddings return await self.embedding_model.async_get_embeddings(query) │ │ │ └ ' ' │ │ └ │ └ > File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/embedding_model/base_embedding_model.py", line 123, in async_get_embeddings return await self._async_get_embeddings(input_text) │ │ └ ' ' │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/embedding_model/openai_compatible_embedding_model.py", line 124, in _async_get_embeddings completion = await self._async_client.embeddings.create( │ │ │ └ │ │ └ │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/openai/resources/embeddings.py", line 259, in create return await self._post( │ └ > └ File "/opt/reme/.venv/lib/python3.12/site-packages/openai/_base_client.py", line 1884, in post return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls) │ │ │ │ │ └ None │ │ │ │ └ False │ │ │ └ FinalRequestOptions(method='post', url='/embeddings', params={}, headers=NOT_GIVEN, max_retries=NOT_GIVEN, timeout=NOT_GIVEN,... │ │ └ │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/openai/_base_client.py", line 1636, in request raise APIConnectionError(request=request) from err │ └ openai.APIConnectionError: Connection error. 2026-04-07 15:56:51 | ERROR | load_today_memory_op.py:103 | Error retrieving today's memories: Connection error. ``` FlowLLM 的 openai_compatible_embedding_model.py 脚本中使用的环境变量是写死的 ```python self.api_key = api_key or os.getenv("FLOW_EMBEDDING_API_KEY", "") self.base_url = base_url or os.getenv("FLOW_EMBEDDING_BASE_URL", "") ``` 在 .env 文件添加上正确的环境变量即可 ``` LLM_API_KEY=sk-*** LLM_BASE_URL=http://IP:PORT/v1 EMBEDDING_API_KEY=sk-*** EMBEDDING_BASE_URL=http://IP:PORT/v1 FLOW_EMBEDDING_API_KEY=sk-*** FLOW_EMBEDDING_BASE_URL=http://IP:PORT/v1 FLOW_LLM_API_KEY=sk-*** FLOW_LLM_BASE_URL=http://IP:PORT/v1 ``` ## 坑点2、不知道问题源头(未解决) ``` 2026-04-08 10:33:27 | ERROR | base_embedding_model.py:126 | embedding model name=bge-m3 encounter error with e=('Error code: 400 - {\'error\': {\'message\': \'Model "bge-m3" does not support matryoshka representation, changing output dimensions will lead to poor results.\', \'type\': \'BadRequestError\', \'param\': \'\', \'code\': 400}}',) Traceback (most recent call last): File "/opt/reme/.venv/bin/reme", line 6, in sys.exit(main()) │ │ └ │ └ File "/opt/reme/reme_ai/main.py", line 231, in main app.run_service() │ └  └  File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/application.py", line 384, in run_service service.run() │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/service/http_service.py", line 123, in run uvicorn.run( │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/uvicorn/main.py", line 606, in run server.run() │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/uvicorn/server.py", line 75, in run return asyncio_run(self.serve(sockets=sockets), loop_factory=self.config.get_loop_factory()) │ │ │ │ │ │ └ │ │ │ │ │ └ │ │ │ │ └ │ │ │ └ None │ │ └ │ └ File "/usr/lib/python3.12/asyncio/runners.py", line 194, in run return runner.run(main) │ │ └ │ └ File "/usr/lib/python3.12/asyncio/runners.py", line 118, in run return self._loop.run_until_complete(task) │ │ │ └ │ └ > File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/embedding_model/base_embedding_model.py", line 123, in async_get_embeddings return await self._async_get_embeddings(input_text) │ │ └ ['Xiao Bai, 29, apples, Hami melons'] │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/embedding_model/openai_compatible_embedding_model.py", line 124, in _async_get_embeddings completion = await self._async_client.embeddings.create( │ │ │ └ │ │ └ │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/openai/resources/embeddings.py", line 259, in create return await self._post( │ └ > └ File "/opt/reme/.venv/lib/python3.12/site-packages/openai/_base_client.py", line 1884, in post return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls) │ │ │ │ │ └ None │ │ │ │ └ False │ │ │ └ FinalRequestOptions(method='post', url='/embeddings', params={}, headers=NOT_GIVEN, max_retries=NOT_GIVEN, timeout=NOT_GIVEN,... │ │ └ │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/openai/_base_client.py", line 1669, in request raise self._make_status_error_from_response(err.response) from None │ └ openai.BadRequestError: Error code: 400 - {'error': {'message': 'Model "bge-m3" does not support matryoshka representation, changing output dimensions will lead to poor results.', 'type': 'BadRequestError', 'param': '', 'code': 400}} 2026-04-08 10:33:27 | ERROR | base_async_op.py:190 | op=update_vector_store_op async execute failed, error=('Error code: 400 - {\'error\': {\'message\': \'Model "bge-m3" does not support matryoshka representation, changing output dimensions will lead to poor results.\', \'type\': \'BadRequestError\', \'param\': \'\', \'code\': 400}}',) Traceback (most recent call last): File "/opt/reme/.venv/bin/reme", line 6, in sys.exit(main()) │ │ └ │ └ File "/opt/reme/reme_ai/main.py", line 231, in main app.run_service() │ └  └  File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/application.py", line 384, in run_service service.run() │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/service/http_service.py", line 123, in run uvicorn.run( │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/uvicorn/main.py", line 606, in run server.run() │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/uvicorn/server.py", line 75, in run return asyncio_run(self.serve(sockets=sockets), loop_factory=self.config.get_loop_factory()) │ │ │ │ │ │ └ │ │ │ │ │ └ │ │ │ │ └ │ │ │ └ None │ │ └ │ └ File "/usr/lib/python3.12/asyncio/runners.py", line 194, in run return runner.run(main) │ │ └ │ └ File "/usr/lib/python3.12/asyncio/runners.py", line 118, in run return self._loop.run_until_complete(task) │ │ │ └ │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/uvicorn/protocols/http/httptools_impl.py", line 416, in run_asgi result = await app( # type: ignore[func-returns-value] └ File "/opt/reme/.venv/lib/python3.12/site-packages/uvicorn/middleware/proxy_headers.py", line 60, in __call__ return await self.app(scope, receive, send) │ │ │ │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/fastapi/applications.py", line 1163, in __call__ await super().__call__(scope, receive, send) │ │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/starlette/middleware/errors.py", line 164, in __call__ await self.app(scope, receive, _send) │ │ │ │ └ ._send at 0x73b6148ef740> │ │ │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/starlette/middleware/cors.py", line 88, in __call__ await self.app(scope, receive, send) │ │ │ │ └ ._send at 0x73b6148ef740> │ │ │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/starlette/middleware/exceptions.py", line 63, in __call__ await wrap_app_handling_exceptions(self.app, conn)(scope, receive, send) │ │ │ │ │ │ └ ._send at 0x73b6148ef740> │ │ │ │ │ └ │ │ └ │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/starlette/_exception_handler.py", line 42, in wrapped_app await app(scope, receive, sender) │ │ │ └ .wrapped_app..sender at 0x73b6148ef880> │ │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/fastapi/middleware/asyncexitstack.py", line 18, in __call__ await self.app(scope, receive, send) │ │ │ │ └ .wrapped_app..sender at 0x73b6148ef880> │ │ │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/starlette/routing.py", line 660, in __call__ await self.middleware_stack(scope, receive, send) │ │ │ │ └ .wrapped_app..sender at 0x73b6148ef880> │ │ │ └ > └ File "/opt/reme/.venv/lib/python3.12/site-packages/starlette/routing.py", line 680, in app await route.handle(scope, receive, send) │ │ │ │ └ .wrapped_app..sender at 0x73b6148ef880> │ │ │ └ └ APIRoute(path='/summary_personal_memory', name='execute_endpoint', methods=['POST']) File "/opt/reme/.venv/lib/python3.12/site-packages/starlette/routing.py", line 276, in handle await self.app(scope, receive, send) │ │ │ │ └ .wrapped_app..sender at 0x73b6148ef880> │ │ │ └ .app at 0x73b614d40ae0> └ APIRoute(path='/summary_personal_memory', name='execute_endpoint', methods=['POST']) File "/opt/reme/.venv/lib/python3.12/site-packages/fastapi/routing.py", line 134, in app await wrap_app_handling_exceptions(app, request)(scope, receive, send) │ │ │ │ │ └ .wrapped_app..sender at 0x73b6148ef880> │ │ │ │ └ │ └ .app..app at 0x73b6148ef920> └ File "/opt/reme/.venv/lib/python3.12/site-packages/starlette/_exception_handler.py", line 42, in wrapped_app await app(scope, receive, sender) │ │ │ └ .wrapped_app..sender at 0x73b6148efa60> │ │ └ .app..app at 0x73b6148ef920> File "/opt/reme/.venv/lib/python3.12/site-packages/fastapi/routing.py", line 120, in app response = await f(request) │ └ .app at 0x73b614d17d80> File "/opt/reme/.venv/lib/python3.12/site-packages/fastapi/routing.py", line 674, in app raw_response = await run_endpoint_function( └ File "/opt/reme/.venv/lib/python3.12/site-packages/fastapi/routing.py", line 328, in run_endpoint_function return await dependant.call(**values) │ │ └ {'request': SummaryPersonalMemoryModel(query='', messages=[], workspace_id='example_user_id', metadata={}, trajectories=[{'me... │ └ .execute_endpoint at 0x73b614e02e80> └ Dependant(path_params=[], query_params=[], header_params=[], cookie_params=[], body_params=[ModelField(field_info=Body(Pydant... File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/service/http_service.py", line 61, in execute_endpoint return await flow.async_call(**request.model_dump(exclude_none=True)) │ │ │ └ │ │ └ SummaryPersonalMemoryModel(query='', messages=[], workspace_id='example_user_id', metadata={}, trajectories=[{'messages': [{'... │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/flow/base_flow.py", line 255, in async_call result = await self._async_call(context=context) │ │ └ FlowContext({'query': '', 'messages': [PersonalMemory(workspace_id='example_user_id', memory_id='56dc81c4a3a945fb9aaf8cc671bf... │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/flow/base_flow.py", line 225, in _async_call await flow_op.async_call(context=context) │ │ └ FlowContext({'query': '', 'messages': [PersonalMemory(workspace_id='example_user_id', memory_id='56dc81c4a3a945fb9aaf8cc671bf... │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/op/base_async_op.py", line 185, in async_call result = await self.async_execute() │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/op/sequential_op.py", line 51, in async_execute result = await op.async_call(context=self.context) │ │ │ └ FlowContext({'query': '', 'messages': [PersonalMemory(workspace_id='example_user_id', memory_id='56dc81c4a3a945fb9aaf8cc671bf... │ │ └ │ └ > File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/op/base_async_op.py", line 185, in async_call result = await self.async_execute() │ └ File "/opt/reme/reme_ai/vector_store/update_vector_store_op.py", line 54, in async_execute await self.vector_store.async_insert(nodes=insert_nodes, workspace_id=workspace_id)  │ │ │ └ 'example_user_id'  │ │ └ [VectorNode(unique_id='49dc2f8253d34deaa2e57c185cc62ac2', workspace_id='example_user_id', content='Xiao Bai, 29, apples, Hami...  │ └   └  File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/vector_store/local_vector_store.py", line 546, in async_insert await super().async_insert(nodes, workspace_id, **kwargs) │ │ └ {} │ └ 'example_user_id' └ [VectorNode(unique_id='49dc2f8253d34deaa2e57c185cc62ac2', workspace_id='example_user_id', content='Xiao Bai, 29, apples, Hami... File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/vector_store/memory_vector_store.py", line 755, in async_insert nodes = await self.async_get_node_embeddings(nodes) │ │ └ [VectorNode(unique_id='49dc2f8253d34deaa2e57c185cc62ac2', workspace_id='example_user_id', content='Xiao Bai, 29, apples, Hami... │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/vector_store/base_vector_store.py", line 91, in async_get_node_embeddings return await self.embedding_model.async_get_node_embeddings(nodes) │ │ │ └ [VectorNode(unique_id='49dc2f8253d34deaa2e57c185cc62ac2', workspace_id='example_user_id', content='Xiao Bai, 29, apples, Hami... │ │ └ │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/embedding_model/base_embedding_model.py", line 207, in async_get_node_embeddings batch_results = await asyncio.gather(*batch_tasks) │ │ └ [] │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/embedding_model/base_embedding_model.py", line 129, in async_get_embeddings raise e File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/embedding_model/base_embedding_model.py", line 123, in async_get_embeddings return await self._async_get_embeddings(input_text) │ │ └ ['Xiao Bai, 29, apples, Hami melons'] │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/flowllm/core/embedding_model/openai_compatible_embedding_model.py", line 124, in _async_get_embeddings completion = await self._async_client.embeddings.create( │ │ │ └ │ │ └ │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/openai/resources/embeddings.py", line 259, in create return await self._post( │ └ > └ File "/opt/reme/.venv/lib/python3.12/site-packages/openai/_base_client.py", line 1884, in post return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls) │ │ │ │ │ └ None │ │ │ │ └ False │ │ │ └ FinalRequestOptions(method='post', url='/embeddings', params={}, headers=NOT_GIVEN, max_retries=NOT_GIVEN, timeout=NOT_GIVEN,... │ │ └ │ └ File "/opt/reme/.venv/lib/python3.12/site-packages/openai/_base_client.py", line 1669, in request raise self._make_status_error_from_response(err.response) from None │ └ openai.BadRequestError: Error code: 400 - {'error': {'message': 'Model "bge-m3" does not support matryoshka representation, changing output dimensions will lead to poor results.', 'type': 'BadRequestError', 'param': '', 'code': 400}} ```