Function report

Linux Kernel

v5.5.9

Brick Technologies Co., Ltd

Source Code:block\blk-iocost.c Create Date:2022-07-28 17:48:33
Last Modify:2020-03-12 14:18:49 Copyright©Brick
home page Tree
Annotation kernel can get tool activityDownload SCCTChinese

Name:ioc_timer_fn

Proto:static void ioc_timer_fn(struct timer_list *timer)

Type:void

Parameter:

TypeParameterName
struct timer_list *timer
1343  ioc = container_of - cast a member of a structure out to the containing structure*@ptr: the pointer to the member.*@type: the type of the container struct this is embedded in.*@member: the name of the member within the struct.(timer, structioc, timer)
1346  nr_surpluses = 0 , nr_shortages = 0 , nr_lagging = 0
1347  ppm_rthr = MILLION - qos[QOS_RPPM]
1348  ppm_wthr = MILLION - qos[QOS_WPPM]
1354  ioc_lat_stat(ioc, missed_ppm, & rq_wait_pct)
1357  spin_lock_irq( & lock)
1359  ake a snapshot of the current [v]time and vrate
1361  period_vtime = vnow - vtime starttime
1362  If WARN_ON_ONCE(!period_vtime) Then
1363  spin_unlock_irq( & lock)
1364  Return
1374  If Not waitqueue_active -- locklessly test for waiters on the queue*@wq_head: the waitqueue to test for waiters* returns true if the wait list is not empty* NOTE: this function is lockless and requires care, incorrect usage _will_ && Not atomic64_read( & abs_vdebt) && Not was iocg idle this period? Then Continue
1378  spin_lock( & lock)
1383  iocg_kick_waitq(iocg, & now)
1384  iocg_kick_delay(iocg, & now, 0)
1385  Else if was iocg idle this period? Then
1387  last_inuse = inuse
1392  spin_unlock( & lock)
1394  commit_active_weights(ioc)
1405  vdone = atomic64_read( & done_vtime)
1406  vtime = atomic64_read( & * `vtime` is this iocg's vtime cursor which progresses as IOs are * issued. If lagging behind device vtime, the delta represents * the currently available IO budget. If runnning ahead, the * overage. * `vtime_done` is the same but progressed on completion )
1407  current_hweight(iocg, & hw_active, & hw_inuse)
1415  If ( ppm_rthr != MILLION || ppm_wthr != MILLION ) && Not atomic_read( & use_delay) && Same as above, but does so with platform independent 64bit types.* These must be used when utilizing jiffies_64 (i.e. return value of* get_jiffies_64() (vtime, vdone) && Same as above, but does so with platform independent 64bit types.* These must be used when utilizing jiffies_64 (i.e. return value of* get_jiffies_64() (vtime, vnow - MAX_LAGGING_PERIODS * period_vtime) && time_before64(vdone, vnow - period_vtime) Then nr_lagging++
1423  If waitqueue_active -- locklessly test for waiters on the queue*@wq_head: the waitqueue to test for waiters* returns true if the wait list is not empty* NOTE: this function is lockless and requires care, incorrect usage _will_ Then vusage = vnow - last_vtime
1425  Else if time_before64(last_vtime, vtime) Then vusage = vtime - last_vtime
1427  Else vusage = 0
1430  last_vtime += vusage
1436  vusage = max - return maximum of two values of the same or compatible types*@x: first value*@y: second value(vusage, vtime - vdone)
1439  If vusage Then
1440  usage = DIV64_U64_ROUND_UP(vusage * hw_inuse, period_vtime)
1442  usage is recorded as fractions of HWEIGHT_WHOLE = ( usage is recorded as fractions of HWEIGHT_WHOLE + 1) % NR_USAGE_SLOTS
1443  usages[ usage is recorded as fractions of HWEIGHT_WHOLE ] = usage
1444  Else
1445  usage = 0
1449  vmargin = margin_us * vrate
1450  vmin = vnow - vmargin
1452  has_surplus = false
1454  If Not waitqueue_active -- locklessly test for waiters on the queue*@wq_head: the waitqueue to test for waiters* returns true if the wait list is not empty* NOTE: this function is lockless and requires care, incorrect usage _will_ && time_before64(vtime, vmin) Then
1456  delta = vmin - vtime
1459  atomic64_add(delta, & * `vtime` is this iocg's vtime cursor which progresses as IOs are * issued. If lagging behind device vtime, the delta represents * the currently available IO budget. If runnning ahead, the * overage. * `vtime_done` is the same but progressed on completion )
1460  atomic64_add(delta, & done_vtime)
1461  last_vtime += delta
1464  has_surplus = true
1465  nr_surpluses++
1467  Else if hw_inuse < hw_active Then
1472  new_hwi = hw_active
1473  Else
1479  new_inuse = div64_u64 - unsigned 64bit divide with 64bit divisor*@dividend: 64bit dividend*@divisor: 64bit divisor* This implementation is a modified version of the algorithm proposed* by the book 'Hacker's Delight'. The original source and full proof
1481  new_inuse = clamp_t - return a value clamped to a given range using a given type*@type: the type of variable to use*@val: current value*@lo: minimum allowable value*@hi: maximum allowable value* This macro does no typechecking and uses temporary variables of type(u32, new_inuse, 1, active)
1483  If new_inuse > inuse Then
1490  Else
1492  nr_shortages++
1496  If Not nr_shortages || Not nr_surpluses Then Go to skip_surplus_transfers
1502  nr_valid = 0
1504  If Not has_surplus Then Continue
1508  When i < NR_USAGE_SLOTS cycle
1509  If usages[i] Then
1511  nr_valid++
1514  If nr_valid < MIN_VALID_USAGES Then Continue
1517  current_hweight(iocg, & hw_active, & hw_inuse)
1518  new_hwi = rns usage with margin added if surplus is large enough
1519  If Not new_hwi Then Continue
1522  new_inuse = DIV64_U64_ROUND_UP((u64)inuse * new_hwi, hw_inuse)
1524  If new_inuse < inuse Then
1525  TRACE_IOCG_PATH(inuse_giveaway, iocg, & now, inuse, new_inuse, hw_inuse, new_hwi)
1528  Update @iocg's `active` and `inuse` to @active and @inuse, update level* weight sums and propagate upwards accordingly.
1531  skip_surplus_transfers :
1532  commit_active_weights(ioc)
1540  prev_busy_level = saturation history
1541  If rq_wait_pct > RQ_WAIT_BUSY_PCT || missed_ppm[generic data direction definitions ] > ppm_rthr || missed_ppm[WRITE] > ppm_wthr Then
1544  saturation history = max - return maximum of two values of the same or compatible types*@x: first value*@y: second value(saturation history , 0)
1545  saturation history ++
1546  Else if rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 && missed_ppm[generic data direction definitions ] <= ppm_rthr * UNBUSY_THR_PCT / 100 && missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100 Then
1550  If nr_shortages && Not nr_lagging Then
1553  If Not nr_surpluses Then saturation history --
1556  Else
1557  saturation history = 0
1560  saturation history = clamp - return a value clamped to a given range with strict typechecking*@val: current value*@lo: lowest allowable value*@hi: highest allowable value* This macro does strict typechecking of @lo/@hi to make sure they are of the* same type as @val(saturation history , - 1000, 1000)
1562  If saturation history > 0 || saturation history < 0 && Not nr_lagging Then
1563  vrate = atomic64_read( & vtime_rate)
1564  vrate_min = vrate_min , vrate_max = vrate_max
1567  If rq_wait_pct > RQ_WAIT_BUSY_PCT Then vrate_min = VRATE_MIN
1575  If vrate < vrate_min Then
1579  Else if vrate > vrate_max Then
1583  Else
1588  If saturation history > 0 Then adj_pct = 100 - adj_pct
1590  Else adj_pct = 100 + adj_pct
1597  trace_iocost_ioc_vrate_adj(ioc, vrate, & missed_ppm, rq_wait_pct, nr_lagging, nr_shortages, nr_surpluses)
1601  atomic64_set( & vtime_rate, vrate)
1602  inuse_margin_vtime = DIV64_U64_ROUND_UP(period_us * vrate * INUSE_MARGIN_PCT, 100)
1604  Else if saturation history != prev_busy_level || nr_lagging Then
1605  trace_iocost_ioc_vrate_adj(ioc, atomic64_read( & vtime_rate), & missed_ppm, rq_wait_pct, nr_lagging, nr_shortages, nr_surpluses)
1610  ioc_refresh_params(ioc, false)
1616  atomic64_inc( & c'd each period )
1618  If running != IOC_STOP Then
1620  ioc_start_period(ioc, & now)
1621  Else
1623  running = IOC_IDLE
1627  spin_unlock_irq( & lock)