Function report

Linux Kernel

v5.5.9

Brick Technologies Co., Ltd

Source Code:mm\madvise.c Create Date:2022-07-28 15:12:13
Last Modify:2020-03-12 14:18:49 Copyright©Brick
home page Tree
Annotation kernel can get tool activityDownload SCCTChinese

Name:madvise_cold_or_pageout_pte_range

Proto:static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk)

Type:int

Parameter:

TypeParameterName
pmd_t *pmd
unsigned longaddr
unsigned longend
struct mm_walk *walk
304  private = private
305  tlb = tlb
306  pageout = pageout
307  mm = mm
308  vma = vma
311  struct page * page = NULL
312  LIST_HEAD(page_list)
314  If fatal_signal_pending(current process) Then Return -EINTR
318  If pmd_trans_huge( * pmd) Then
320  next = pmd_addr_end(addr, end)
322  tlb_change_page_size(tlb, HPAGE_PMD_SIZE)
323  ptl = mmap_sem must be held on entry
324  If Not ptl Then Return 0
327  orig_pmd = pmd
328  If is_huge_zero_pmd(orig_pmd) Then Go to huge_unlock
334  Go to huge_unlock
337  page = Currently stuck as a macro due to indirect forward reference to* linux/mmzone.h's __section_mem_map_addr() definition:(orig_pmd)
338  If next - addr != HPAGE_PMD_SIZE Then
341  If page_mapcount(page) != 1 Then Go to huge_unlock
344  get_page(page)
345  spin_unlock(ptl)
349  put_page(page)
350  If Not err Then Go to regular_page
352  Return 0
355  If pmd_young(orig_pmd) Then
363  ClearPageReferenced(page)
364  test_and_clear_page_young(page)
365  If pageout Then
372  Else deactivate_page - deactivate a page*@page: page to deactivate* deactivate_page() moves @page to the inactive list if @page was on the active* list and was not an unevictable page. This is done to accelerate the reclaim* of @page.
374  huge_unlock :
375  spin_unlock(ptl)
376  If pageout Then reclaim_pages( & page_list)
378  Return 0
381  If This is a noop if Transparent Hugepage Support is not built into* the kernel Then Return 0
383  regular_page :
385  tlb_change_page_size(tlb, PAGE_SIZE)
386  orig_pte = pte = pte_offset_map_lock(The address space we belong to. , pmd, addr, & ptl)
387  flush_tlb_batched_pending(mm)
388  A facility to provide lazy MMU batching()
389  When addr < end cycle
390  ptent = pte
392  If pte_none(ptent) Then Continue
395  If Not pte_present(ptent) Then Continue
398  page = vm_normal_page -- This function gets the "struct page" associated with a pte.* "Special" mappings do not wish to be associated with a "struct page" (either* it doesn't exist, or it exists but they don't want to touch it). In this
399  If Not page Then Continue
407  If page_mapcount(page) != 1 Then Break
409  get_page(page)
411  put_page(page)
412  Break
415  If split_huge_page(page) Then
422  put_page(page)
424  pte--
425  addr -= PAGE_SIZE
426  Continue
429  VM_BUG_ON_PAGE(PageTransCompound returns true for both transparent huge pages* and hugetlbfs pages, so it should only be called when it's known* that hugetlbfs pages aren't involved., page)
431  If pte_young(ptent) Then
434  ptent = pte_mkold(ptent)
435  set_pte_at(mm, addr, pte, ptent)
445  ClearPageReferenced(page)
446  test_and_clear_page_young(page)
447  If pageout Then
454  Else deactivate_page - deactivate a page*@page: page to deactivate* deactivate_page() moves @page to the inactive list if @page was on the active* list and was not an unevictable page. This is done to accelerate the reclaim* of @page.
458  arch_leave_lazy_mmu_mode()
459  pte_unmap_unlock(orig_pte, ptl)
460  If pageout Then reclaim_pages( & page_list)
462  cond_resched()
464  Return 0