Function report

Linux Kernel

v5.5.9

Brick Technologies Co., Ltd

Source Code:mm\khugepaged.c Create Date:2022-07-28 16:06:49
Last Modify:2020-03-12 14:18:49 Copyright©Brick
home page Tree
Annotation kernel can get tool activityDownload SCCTChinese

Name:collapse_huge_page

Proto:static void collapse_huge_page(struct mm_struct *mm, unsigned long address, struct page **hpage, int node, int referenced)

Type:void

Parameter:

TypeParameterName
struct mm_struct *mm
unsigned longaddress
struct page **hpage
intnode
intreferenced
956  isolated = 0 , result = 0
962  VM_BUG_ON(address & ~HPAGE_PMD_MASK)
965  gfp = Defrag for khugepaged will enter direct reclaim/compaction if necessary | __GFP_THISNODE
973  lease a read lock
974  new_page = khugepaged_alloc_page(hpage, gfp, node)
975  If Not new_page Then
976  result = SCAN_ALLOC_HUGE_PAGE_FAIL
977  Go to out_nolock
980  If Value for the false possibility is greater at compile time(mem_cgroup_try_charge(new_page, mm, gfp, & memcg, true)) Then
981  result = SCAN_CGROUP_CHARGE_FAIL
982  Go to out_nolock
985  lock for reading
986  result = If mmap_sem temporarily dropped, revalidate vma* before taking mmap_sem.* Return 0 if succeeds, otherwise return none-zero* value (scan code).
987  If result Then
988  mem_cgroup_cancel_charge(new_page, memcg, true)
989  lease a read lock
990  Go to out_nolock
993  pmd = mm_find_pmd(mm, address)
994  If Not pmd Then
995  result = SCAN_PMD_NULL
996  mem_cgroup_cancel_charge(new_page, memcg, true)
997  lease a read lock
998  Go to out_nolock
1006  If Not Bring missing pages in from swap, to complete THP collapse.* Only done if khugepaged_scan_pmd believes it is worthwhile.* Called and returns without pte mapped or spinlocks held,* but with mmap_sem held to protect against vma changes. Then
1007  mem_cgroup_cancel_charge(new_page, memcg, true)
1008  lease a read lock
1009  Go to out_nolock
1012  lease a read lock
1018  lock for writing
1019  result = SCAN_ANY_PROCESS
1020  If Not This has to be called after a get_task_mm()/mmget_not_zero()* followed by taking the mmap_sem for writing before modifying the* vmas or anything the coredump pretends not to change from under it Then Go to out
1022  result = If mmap_sem temporarily dropped, revalidate vma* before taking mmap_sem.* Return 0 if succeeds, otherwise return none-zero* value (scan code).
1023  If result Then Go to out
1026  If mm_find_pmd(mm, address) != pmd Then Go to out
1029  anon_vma_lock_write(Serialized by page_table_lock )
1031  mmu_notifier_range_init( & range, MMU_NOTIFY_CLEAR, 0, NULL, mm, address, address + HPAGE_PMD_SIZE)
1033  mmu_notifier_invalidate_range_start( & range)
1035  pte = pte_offset_map(pmd, address)
1036  pte_ptl = pte_lockptr(mm, pmd)
1038  pmd_ptl = pmd_lock(mm, pmd)
1045  _pmd = pmdp_collapse_flush(vma, address, pmd)
1046  spin_unlock(pmd_ptl)
1047  mmu_notifier_invalidate_range_end( & range)
1049  spin_lock(pte_ptl)
1050  isolated = __collapse_huge_page_isolate(vma, address, pte)
1051  spin_unlock(pte_ptl)
1053  If Value for the false possibility is greater at compile time(!isolated) Then
1054  pte_unmap(pte)
1055  spin_lock(pmd_ptl)
1056  BUG_ON(!pmd_none( * pmd))
1062  pmd_populate(mm, pmd, pmd_pgtable(_pmd))
1063  spin_unlock(pmd_ptl)
1064  anon_vma_unlock_write(Serialized by page_table_lock )
1065  result = SCAN_FAIL
1066  Go to out
1073  anon_vma_unlock_write(Serialized by page_table_lock )
1075  __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl)
1076  pte_unmap(pte)
1077  __SetPageUptodate(new_page)
1078  pgtable = pmd_pgtable(_pmd)
1080  _pmd = mk_huge_pmd(new_page, Access permissions of this VMA. )
1081  _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma)
1088  smp_wmb()
1090  spin_lock(pmd_ptl)
1091  BUG_ON(!pmd_none( * pmd))
1092  page_add_new_anon_rmap - add pte mapping to a new anonymous page*@page: the page to add the mapping to*@vma: the vm area in which the mapping is added*@address: the user virtual address mapped*@compound: charge the page as compound or small page
1093  mem_cgroup_commit_charge(new_page, memcg, TSC's on different sockets may be reset asynchronously.* This may cause the TSC ADJUST value on socket 0 to be NOT 0., true)
1094  count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1)
1095  lru_cache_add_active_or_unevictable*@page: the page to be added to LRU*@vma: vma in which page is mapped for determining reclaimability* Place @page on the active or unevictable LRU list, depending on its* evictability
1096  pgtable_trans_huge_deposit(mm, pmd, pgtable)
1097  set_pmd_at(mm, address, pmd, _pmd)
1098  update_mmu_cache_pmd(vma, address, pmd)
1099  spin_unlock(pmd_ptl)
1101  * hpage = NULL
1103  khugepaged_pages_collapsed++
1104  result = SCAN_SUCCEED
1105  out_up_write :
1106  lease a write lock
1107  out_nolock :
1108  trace_mm_collapse_huge_page(mm, isolated, result)
1109  Return
1110  out :
1111  mem_cgroup_cancel_charge(new_page, memcg, true)
1112  Go to out_up_write
Caller
NameDescribe
khugepaged_scan_pmd