@@ -1409,29 +1409,31 @@ describe('context-pruner threshold behavior', () => {
14091409 return results
14101410 }
14111411
1412- test ( 'does not prune when exactly at max limit' , ( ) => {
1412+ test ( 'does not prune when under max limit minus fudge factor ' , ( ) => {
14131413 const messages = [
14141414 createMessage ( 'user' , 'Hello' ) ,
14151415 createMessage ( 'assistant' , 'Hi' ) ,
14161416 ]
14171417
1418- // Set context to exactly max limit - should NOT prune
1419- const results = runHandleSteps ( messages , 200000 , 200000 )
1418+ // Set context to max limit minus fudge factor (1000) - should NOT prune
1419+ // contextTokenCount + 1000 <= maxContextLength => 199000 + 1000 <= 200000
1420+ const results = runHandleSteps ( messages , 199000 , 200000 )
14201421
14211422 // Should preserve original messages (not summarized)
14221423 expect ( results [ 0 ] . input . messages ) . toHaveLength ( 2 )
14231424 expect ( results [ 0 ] . input . messages [ 0 ] . role ) . toBe ( 'user' )
14241425 expect ( results [ 0 ] . input . messages [ 1 ] . role ) . toBe ( 'assistant' )
14251426 } )
14261427
1427- test ( 'prunes when just over max limit' , ( ) => {
1428+ test ( 'prunes when at max limit due to fudge factor ' , ( ) => {
14281429 const messages = [
14291430 createMessage ( 'user' , 'Hello' ) ,
14301431 createMessage ( 'assistant' , 'Hi' ) ,
14311432 ]
14321433
1433- // Set context to just over max limit - should prune
1434- const results = runHandleSteps ( messages , 200001 , 200000 )
1434+ // Set context to exactly max limit - should prune due to 1000 token fudge factor
1435+ // contextTokenCount + 1000 > maxContextLength => 200000 + 1000 > 200000
1436+ const results = runHandleSteps ( messages , 200000 , 200000 )
14351437
14361438 // Should have summarized to single message
14371439 expect ( results [ 0 ] . input . messages ) . toHaveLength ( 1 )
0 commit comments