1
- from typing import List , Optional
1
+ from typing import Any , Dict , List , Optional
2
2
from uuid import UUID
3
3
4
4
import requests
5
5
import structlog
6
- from fastapi import APIRouter , Depends , HTTPException , Response
6
+ from fastapi import APIRouter , Depends , HTTPException , Query , Response
7
7
from fastapi .responses import StreamingResponse
8
8
from fastapi .routing import APIRoute
9
9
from pydantic import BaseModel , ValidationError
10
10
11
+ from codegate .config import API_DEFAULT_PAGE_SIZE , API_MAX_PAGE_SIZE
11
12
import codegate .muxing .models as mux_models
12
13
from codegate import __version__
13
14
from codegate .api import v1_models , v1_processing
@@ -378,7 +379,11 @@ async def hard_delete_workspace(workspace_name: str):
378
379
tags = ["Workspaces" ],
379
380
generate_unique_id_function = uniq_name ,
380
381
)
381
- async def get_workspace_alerts (workspace_name : str ) -> List [Optional [v1_models .AlertConversation ]]:
382
+ async def get_workspace_alerts (
383
+ workspace_name : str ,
384
+ page : int = Query (1 , ge = 1 ),
385
+ page_size : int = Query (API_DEFAULT_PAGE_SIZE , get = 1 , le = API_MAX_PAGE_SIZE ),
386
+ ) -> Dict [str , Any ]:
382
387
"""Get alerts for a workspace."""
383
388
try :
384
389
ws = await wscrud .get_workspace_by_name (workspace_name )
@@ -388,13 +393,35 @@ async def get_workspace_alerts(workspace_name: str) -> List[Optional[v1_models.A
388
393
logger .exception ("Error while getting workspace" )
389
394
raise HTTPException (status_code = 500 , detail = "Internal server error" )
390
395
391
- try :
392
- alerts = await dbreader .get_alerts_by_workspace (ws .id , AlertSeverity .CRITICAL .value )
393
- prompts_outputs = await dbreader .get_prompts_with_output (ws .id )
394
- return await v1_processing .parse_get_alert_conversation (alerts , prompts_outputs )
395
- except Exception :
396
- logger .exception ("Error while getting alerts and messages" )
397
- raise HTTPException (status_code = 500 , detail = "Internal server error" )
396
+ total_alerts = 0
397
+ fetched_alerts = []
398
+ offset = (page - 1 ) * page_size
399
+ batch_size = page_size * 2 # fetch more alerts per batch to allow deduplication
400
+
401
+ while len (fetched_alerts ) < page_size :
402
+ alerts_batch , total_alerts = await dbreader .get_alerts_by_workspace (
403
+ ws .id , AlertSeverity .CRITICAL .value , page_size , offset
404
+ )
405
+ if not alerts_batch :
406
+ break
407
+
408
+ dedup_alerts = await v1_processing .remove_duplicate_alerts (alerts_batch )
409
+ fetched_alerts .extend (dedup_alerts )
410
+ offset += batch_size
411
+
412
+ final_alerts = fetched_alerts [:page_size ]
413
+ prompt_ids = list ({alert .prompt_id for alert in final_alerts if alert .prompt_id })
414
+ prompts_outputs = await dbreader .get_prompts_with_output (prompt_ids )
415
+ alert_conversations = await v1_processing .parse_get_alert_conversation (
416
+ final_alerts , prompts_outputs
417
+ )
418
+ return {
419
+ "page" : page ,
420
+ "page_size" : page_size ,
421
+ "total_alerts" : total_alerts ,
422
+ "total_pages" : (total_alerts + page_size - 1 ) // page_size ,
423
+ "alerts" : alert_conversations ,
424
+ }
398
425
399
426
400
427
@v1 .get (
0 commit comments