Skip to content

Commit c1dd898

Browse files
authored
Merge pull request #277 from marcklingen/fix-langfuse-filter
fix: langfuse filter pipeline cost tracking
2 parents 0b99d06 + 5788998 commit c1dd898

File tree

1 file changed

+24
-12
lines changed

1 file changed

+24
-12
lines changed

examples/filters/langfuse_filter_pipeline.py

+24-12
Original file line numberDiff line numberDiff line change
@@ -1,23 +1,28 @@
11
"""
22
title: Langfuse Filter Pipeline
33
author: open-webui
4-
date: 2024-05-30
5-
version: 1.3
4+
date: 2024-09-27
5+
version: 1.4
66
license: MIT
77
description: A filter pipeline that uses Langfuse.
88
requirements: langfuse
99
"""
1010

1111
from typing import List, Optional
12-
from schemas import OpenAIChatMessage
1312
import os
1413
import uuid
1514

16-
from utils.pipelines.main import get_last_user_message, get_last_assistant_message
15+
from utils.pipelines.main import get_last_assistant_message
1716
from pydantic import BaseModel
1817
from langfuse import Langfuse
1918
from langfuse.api.resources.commons.errors.unauthorized_error import UnauthorizedError
2019

20+
def get_last_assistant_message_obj(messages: List[dict]) -> dict:
21+
for message in reversed(messages):
22+
if message["role"] == "assistant":
23+
return message
24+
return {}
25+
2126

2227
class Pipeline:
2328
class Valves(BaseModel):
@@ -109,21 +114,28 @@ async def inlet(self, body: dict, user: Optional[dict] = None) -> dict:
109114

110115
async def outlet(self, body: dict, user: Optional[dict] = None) -> dict:
111116
print(f"outlet:{__name__}")
117+
print(f"Received body: {body}")
112118
if body["chat_id"] not in self.chat_generations:
113119
return body
114120

115121
generation = self.chat_generations[body["chat_id"]]
116122
assistant_message = get_last_assistant_message(body["messages"])
117123

118-
# Extract usage information
119-
info = assistant_message.get("info", {})
124+
125+
# Extract usage information for models that support it
120126
usage = None
121-
if "prompt_tokens" in info and "completion_tokens" in info:
122-
usage = {
123-
"input": info["prompt_tokens"],
124-
"output": info["completion_tokens"],
125-
"unit": "TOKENS",
126-
}
127+
assistant_message_obj = get_last_assistant_message_obj(body["messages"])
128+
if assistant_message_obj:
129+
info = assistant_message_obj.get("info", {})
130+
if isinstance(info, dict):
131+
input_tokens = info.get("prompt_eval_count") or info.get("prompt_tokens")
132+
output_tokens = info.get("eval_count") or info.get("completion_tokens")
133+
if input_tokens is not None and output_tokens is not None:
134+
usage = {
135+
"input": input_tokens,
136+
"output": output_tokens,
137+
"unit": "TOKENS",
138+
}
127139

128140
# Update generation
129141
generation.end(

0 commit comments

Comments
 (0)