@@ -1705,6 +1705,9 @@ static void kpb_init_draining(struct comp_dev *dev, struct kpb_client *cli)
17051705 kpb -> draining_task_data .next_copy_time = 0 ;
17061706 kpb -> draining_task_data .dev = dev ;
17071707 kpb -> draining_task_data .sync_mode_on = kpb -> sync_draining_mode ;
1708+ kpb -> draining_task_data .task_iteration = 0 ;
1709+ kpb -> draining_task_data .prev_adjustment_time = 0 ;
1710+ kpb -> draining_task_data .prev_adjustment_drained = 0 ;
17081711
17091712 /* save current sink copy type */
17101713 comp_get_attribute (comp_buffer_get_sink_component (kpb -> host_sink ),
@@ -1732,6 +1735,52 @@ static void kpb_init_draining(struct comp_dev *dev, struct kpb_client *cli)
17321735 }
17331736}
17341737
1738+ static void adjust_drain_interval (struct comp_data * kpb , struct draining_data * dd )
1739+ {
1740+ uint64_t now ; /* timestamp in wall-clock cycles */
1741+
1742+ /* readjast drain_interval every 32 task iterations */
1743+ if (dd -> task_iteration ++ % 32 )
1744+ return ;
1745+
1746+ now = sof_cycle_get_64 ();
1747+
1748+ if (dd -> prev_adjustment_time ) {
1749+ size_t drained ;
1750+ size_t elapsed ;
1751+ size_t actual_pace , optimal_pace ;
1752+ size_t pipeline_period ;
1753+
1754+ drained = dd -> drained - dd -> prev_adjustment_drained ;
1755+ elapsed = now - dd -> prev_adjustment_time ;
1756+ assert (elapsed );
1757+ /* average drained bytes per second */
1758+ actual_pace = (size_t )k_ms_to_cyc_ceil64 (1000 ) / elapsed * drained ;
1759+
1760+ pipeline_period = KPB_SAMPLES_PER_MS *
1761+ (KPB_SAMPLE_CONTAINER_SIZE (dd -> sample_width ) / 8 ) * kpb -> config .channels ;
1762+ /* desired draining pace in bytes per second */
1763+ optimal_pace = pipeline_period * KPB_DRAIN_NUM_OF_PPL_PERIODS_AT_ONCE * 1000 ;
1764+
1765+ /* just in case to prevent div by 0 if draining is stuck (e.g. because of host) */
1766+ if (actual_pace ) {
1767+ if (actual_pace < optimal_pace ) {
1768+ dd -> drain_interval /= optimal_pace / actual_pace ;
1769+ dd -> drain_interval -= dd -> drain_interval / 8 ;
1770+ } else if (actual_pace > optimal_pace ) {
1771+ dd -> drain_interval *= actual_pace / optimal_pace ;
1772+ dd -> drain_interval += dd -> drain_interval / 8 ;
1773+ }
1774+ /* the above algorithm will get stuck if the drain_interval is below 8 */
1775+ if (dd -> drain_interval < 8 )
1776+ dd -> drain_interval = 8 ;
1777+ }
1778+ }
1779+
1780+ dd -> prev_adjustment_time = now ;
1781+ dd -> prev_adjustment_drained = dd -> drained ;
1782+ }
1783+
17351784/**
17361785 * \brief Draining task.
17371786 *
@@ -1750,7 +1799,6 @@ static enum task_state kpb_draining_task(void *arg)
17501799 size_t size_to_copy ;
17511800 uint64_t draining_time_end ;
17521801 uint64_t draining_time_ms ;
1753- uint64_t drain_interval = draining_data -> drain_interval ;
17541802 size_t period_bytes_limit = draining_data -> pb_limit ;
17551803 size_t * rt_stream_update = & draining_data -> buffered_while_draining ;
17561804 struct comp_data * kpb = comp_get_drvdata (draining_data -> dev );
@@ -1777,6 +1825,8 @@ static enum task_state kpb_draining_task(void *arg)
17771825 goto out ;
17781826 }
17791827
1828+ adjust_drain_interval (kpb , draining_data );
1829+
17801830 if (draining_data -> drain_req > 0 ) {
17811831 /* Are we ready to drain further or host still need some time
17821832 * to read the data already provided?
@@ -1829,7 +1879,7 @@ static enum task_state kpb_draining_task(void *arg)
18291879
18301880 if (sync_mode_on && draining_data -> period_bytes >= period_bytes_limit ) {
18311881 draining_data -> next_copy_time = draining_data -> period_copy_start +
1832- drain_interval ;
1882+ draining_data -> drain_interval ;
18331883 draining_data -> period_copy_start = 0 ;
18341884 } else {
18351885 draining_data -> next_copy_time = 0 ;
0 commit comments