MIPS: SEAD3: Use symbolic addresses from sead-addr.h in LED driver.
[linux-drm-fsl-dcu.git] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) Nadia Yvette Chambers, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/compiler.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/bootmem.h>
20 #include <linux/sysfs.h>
21 #include <linux/slab.h>
22 #include <linux/rmap.h>
23 #include <linux/swap.h>
24 #include <linux/swapops.h>
25 #include <linux/page-isolation.h>
26 #include <linux/jhash.h>
27
28 #include <asm/page.h>
29 #include <asm/pgtable.h>
30 #include <asm/tlb.h>
31
32 #include <linux/io.h>
33 #include <linux/hugetlb.h>
34 #include <linux/hugetlb_cgroup.h>
35 #include <linux/node.h>
36 #include "internal.h"
37
38 int hugepages_treat_as_movable;
39
40 int hugetlb_max_hstate __read_mostly;
41 unsigned int default_hstate_idx;
42 struct hstate hstates[HUGE_MAX_HSTATE];
43
44 __initdata LIST_HEAD(huge_boot_pages);
45
46 /* for command line parsing */
47 static struct hstate * __initdata parsed_hstate;
48 static unsigned long __initdata default_hstate_max_huge_pages;
49 static unsigned long __initdata default_hstate_size;
50
51 /*
52  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
53  * free_huge_pages, and surplus_huge_pages.
54  */
55 DEFINE_SPINLOCK(hugetlb_lock);
56
57 /*
58  * Serializes faults on the same logical page.  This is used to
59  * prevent spurious OOMs when the hugepage pool is fully utilized.
60  */
61 static int num_fault_mutexes;
62 static struct mutex *htlb_fault_mutex_table ____cacheline_aligned_in_smp;
63
64 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
65 {
66         bool free = (spool->count == 0) && (spool->used_hpages == 0);
67
68         spin_unlock(&spool->lock);
69
70         /* If no pages are used, and no other handles to the subpool
71          * remain, free the subpool the subpool remain */
72         if (free)
73                 kfree(spool);
74 }
75
76 struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
77 {
78         struct hugepage_subpool *spool;
79
80         spool = kmalloc(sizeof(*spool), GFP_KERNEL);
81         if (!spool)
82                 return NULL;
83
84         spin_lock_init(&spool->lock);
85         spool->count = 1;
86         spool->max_hpages = nr_blocks;
87         spool->used_hpages = 0;
88
89         return spool;
90 }
91
92 void hugepage_put_subpool(struct hugepage_subpool *spool)
93 {
94         spin_lock(&spool->lock);
95         BUG_ON(!spool->count);
96         spool->count--;
97         unlock_or_release_subpool(spool);
98 }
99
100 static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
101                                       long delta)
102 {
103         int ret = 0;
104
105         if (!spool)
106                 return 0;
107
108         spin_lock(&spool->lock);
109         if ((spool->used_hpages + delta) <= spool->max_hpages) {
110                 spool->used_hpages += delta;
111         } else {
112                 ret = -ENOMEM;
113         }
114         spin_unlock(&spool->lock);
115
116         return ret;
117 }
118
119 static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
120                                        long delta)
121 {
122         if (!spool)
123                 return;
124
125         spin_lock(&spool->lock);
126         spool->used_hpages -= delta;
127         /* If hugetlbfs_put_super couldn't free spool due to
128         * an outstanding quota reference, free it now. */
129         unlock_or_release_subpool(spool);
130 }
131
132 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
133 {
134         return HUGETLBFS_SB(inode->i_sb)->spool;
135 }
136
137 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
138 {
139         return subpool_inode(file_inode(vma->vm_file));
140 }
141
142 /*
143  * Region tracking -- allows tracking of reservations and instantiated pages
144  *                    across the pages in a mapping.
145  *
146  * The region data structures are embedded into a resv_map and
147  * protected by a resv_map's lock
148  */
149 struct file_region {
150         struct list_head link;
151         long from;
152         long to;
153 };
154
155 static long region_add(struct resv_map *resv, long f, long t)
156 {
157         struct list_head *head = &resv->regions;
158         struct file_region *rg, *nrg, *trg;
159
160         spin_lock(&resv->lock);
161         /* Locate the region we are either in or before. */
162         list_for_each_entry(rg, head, link)
163                 if (f <= rg->to)
164                         break;
165
166         /* Round our left edge to the current segment if it encloses us. */
167         if (f > rg->from)
168                 f = rg->from;
169
170         /* Check for and consume any regions we now overlap with. */
171         nrg = rg;
172         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
173                 if (&rg->link == head)
174                         break;
175                 if (rg->from > t)
176                         break;
177
178                 /* If this area reaches higher then extend our area to
179                  * include it completely.  If this is not the first area
180                  * which we intend to reuse, free it. */
181                 if (rg->to > t)
182                         t = rg->to;
183                 if (rg != nrg) {
184                         list_del(&rg->link);
185                         kfree(rg);
186                 }
187         }
188         nrg->from = f;
189         nrg->to = t;
190         spin_unlock(&resv->lock);
191         return 0;
192 }
193
194 static long region_chg(struct resv_map *resv, long f, long t)
195 {
196         struct list_head *head = &resv->regions;
197         struct file_region *rg, *nrg = NULL;
198         long chg = 0;
199
200 retry:
201         spin_lock(&resv->lock);
202         /* Locate the region we are before or in. */
203         list_for_each_entry(rg, head, link)
204                 if (f <= rg->to)
205                         break;
206
207         /* If we are below the current region then a new region is required.
208          * Subtle, allocate a new region at the position but make it zero
209          * size such that we can guarantee to record the reservation. */
210         if (&rg->link == head || t < rg->from) {
211                 if (!nrg) {
212                         spin_unlock(&resv->lock);
213                         nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
214                         if (!nrg)
215                                 return -ENOMEM;
216
217                         nrg->from = f;
218                         nrg->to   = f;
219                         INIT_LIST_HEAD(&nrg->link);
220                         goto retry;
221                 }
222
223                 list_add(&nrg->link, rg->link.prev);
224                 chg = t - f;
225                 goto out_nrg;
226         }
227
228         /* Round our left edge to the current segment if it encloses us. */
229         if (f > rg->from)
230                 f = rg->from;
231         chg = t - f;
232
233         /* Check for and consume any regions we now overlap with. */
234         list_for_each_entry(rg, rg->link.prev, link) {
235                 if (&rg->link == head)
236                         break;
237                 if (rg->from > t)
238                         goto out;
239
240                 /* We overlap with this area, if it extends further than
241                  * us then we must extend ourselves.  Account for its
242                  * existing reservation. */
243                 if (rg->to > t) {
244                         chg += rg->to - t;
245                         t = rg->to;
246                 }
247                 chg -= rg->to - rg->from;
248         }
249
250 out:
251         spin_unlock(&resv->lock);
252         /*  We already know we raced and no longer need the new region */
253         kfree(nrg);
254         return chg;
255 out_nrg:
256         spin_unlock(&resv->lock);
257         return chg;
258 }
259
260 static long region_truncate(struct resv_map *resv, long end)
261 {
262         struct list_head *head = &resv->regions;
263         struct file_region *rg, *trg;
264         long chg = 0;
265
266         spin_lock(&resv->lock);
267         /* Locate the region we are either in or before. */
268         list_for_each_entry(rg, head, link)
269                 if (end <= rg->to)
270                         break;
271         if (&rg->link == head)
272                 goto out;
273
274         /* If we are in the middle of a region then adjust it. */
275         if (end > rg->from) {
276                 chg = rg->to - end;
277                 rg->to = end;
278                 rg = list_entry(rg->link.next, typeof(*rg), link);
279         }
280
281         /* Drop any remaining regions. */
282         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
283                 if (&rg->link == head)
284                         break;
285                 chg += rg->to - rg->from;
286                 list_del(&rg->link);
287                 kfree(rg);
288         }
289
290 out:
291         spin_unlock(&resv->lock);
292         return chg;
293 }
294
295 static long region_count(struct resv_map *resv, long f, long t)
296 {
297         struct list_head *head = &resv->regions;
298         struct file_region *rg;
299         long chg = 0;
300
301         spin_lock(&resv->lock);
302         /* Locate each segment we overlap with, and count that overlap. */
303         list_for_each_entry(rg, head, link) {
304                 long seg_from;
305                 long seg_to;
306
307                 if (rg->to <= f)
308                         continue;
309                 if (rg->from >= t)
310                         break;
311
312                 seg_from = max(rg->from, f);
313                 seg_to = min(rg->to, t);
314
315                 chg += seg_to - seg_from;
316         }
317         spin_unlock(&resv->lock);
318
319         return chg;
320 }
321
322 /*
323  * Convert the address within this vma to the page offset within
324  * the mapping, in pagecache page units; huge pages here.
325  */
326 static pgoff_t vma_hugecache_offset(struct hstate *h,
327                         struct vm_area_struct *vma, unsigned long address)
328 {
329         return ((address - vma->vm_start) >> huge_page_shift(h)) +
330                         (vma->vm_pgoff >> huge_page_order(h));
331 }
332
333 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
334                                      unsigned long address)
335 {
336         return vma_hugecache_offset(hstate_vma(vma), vma, address);
337 }
338
339 /*
340  * Return the size of the pages allocated when backing a VMA. In the majority
341  * cases this will be same size as used by the page table entries.
342  */
343 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
344 {
345         struct hstate *hstate;
346
347         if (!is_vm_hugetlb_page(vma))
348                 return PAGE_SIZE;
349
350         hstate = hstate_vma(vma);
351
352         return 1UL << huge_page_shift(hstate);
353 }
354 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
355
356 /*
357  * Return the page size being used by the MMU to back a VMA. In the majority
358  * of cases, the page size used by the kernel matches the MMU size. On
359  * architectures where it differs, an architecture-specific version of this
360  * function is required.
361  */
362 #ifndef vma_mmu_pagesize
363 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
364 {
365         return vma_kernel_pagesize(vma);
366 }
367 #endif
368
369 /*
370  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
371  * bits of the reservation map pointer, which are always clear due to
372  * alignment.
373  */
374 #define HPAGE_RESV_OWNER    (1UL << 0)
375 #define HPAGE_RESV_UNMAPPED (1UL << 1)
376 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
377
378 /*
379  * These helpers are used to track how many pages are reserved for
380  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
381  * is guaranteed to have their future faults succeed.
382  *
383  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
384  * the reserve counters are updated with the hugetlb_lock held. It is safe
385  * to reset the VMA at fork() time as it is not in use yet and there is no
386  * chance of the global counters getting corrupted as a result of the values.
387  *
388  * The private mapping reservation is represented in a subtly different
389  * manner to a shared mapping.  A shared mapping has a region map associated
390  * with the underlying file, this region map represents the backing file
391  * pages which have ever had a reservation assigned which this persists even
392  * after the page is instantiated.  A private mapping has a region map
393  * associated with the original mmap which is attached to all VMAs which
394  * reference it, this region map represents those offsets which have consumed
395  * reservation ie. where pages have been instantiated.
396  */
397 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
398 {
399         return (unsigned long)vma->vm_private_data;
400 }
401
402 static void set_vma_private_data(struct vm_area_struct *vma,
403                                                         unsigned long value)
404 {
405         vma->vm_private_data = (void *)value;
406 }
407
408 struct resv_map *resv_map_alloc(void)
409 {
410         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
411         if (!resv_map)
412                 return NULL;
413
414         kref_init(&resv_map->refs);
415         spin_lock_init(&resv_map->lock);
416         INIT_LIST_HEAD(&resv_map->regions);
417
418         return resv_map;
419 }
420
421 void resv_map_release(struct kref *ref)
422 {
423         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
424
425         /* Clear out any active regions before we release the map. */
426         region_truncate(resv_map, 0);
427         kfree(resv_map);
428 }
429
430 static inline struct resv_map *inode_resv_map(struct inode *inode)
431 {
432         return inode->i_mapping->private_data;
433 }
434
435 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
436 {
437         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
438         if (vma->vm_flags & VM_MAYSHARE) {
439                 struct address_space *mapping = vma->vm_file->f_mapping;
440                 struct inode *inode = mapping->host;
441
442                 return inode_resv_map(inode);
443
444         } else {
445                 return (struct resv_map *)(get_vma_private_data(vma) &
446                                                         ~HPAGE_RESV_MASK);
447         }
448 }
449
450 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
451 {
452         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
453         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
454
455         set_vma_private_data(vma, (get_vma_private_data(vma) &
456                                 HPAGE_RESV_MASK) | (unsigned long)map);
457 }
458
459 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
460 {
461         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
462         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
463
464         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
465 }
466
467 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
468 {
469         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
470
471         return (get_vma_private_data(vma) & flag) != 0;
472 }
473
474 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
475 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
476 {
477         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
478         if (!(vma->vm_flags & VM_MAYSHARE))
479                 vma->vm_private_data = (void *)0;
480 }
481
482 /* Returns true if the VMA has associated reserve pages */
483 static int vma_has_reserves(struct vm_area_struct *vma, long chg)
484 {
485         if (vma->vm_flags & VM_NORESERVE) {
486                 /*
487                  * This address is already reserved by other process(chg == 0),
488                  * so, we should decrement reserved count. Without decrementing,
489                  * reserve count remains after releasing inode, because this
490                  * allocated page will go into page cache and is regarded as
491                  * coming from reserved pool in releasing step.  Currently, we
492                  * don't have any other solution to deal with this situation
493                  * properly, so add work-around here.
494                  */
495                 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
496                         return 1;
497                 else
498                         return 0;
499         }
500
501         /* Shared mappings always use reserves */
502         if (vma->vm_flags & VM_MAYSHARE)
503                 return 1;
504
505         /*
506          * Only the process that called mmap() has reserves for
507          * private mappings.
508          */
509         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
510                 return 1;
511
512         return 0;
513 }
514
515 static void enqueue_huge_page(struct hstate *h, struct page *page)
516 {
517         int nid = page_to_nid(page);
518         list_move(&page->lru, &h->hugepage_freelists[nid]);
519         h->free_huge_pages++;
520         h->free_huge_pages_node[nid]++;
521 }
522
523 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
524 {
525         struct page *page;
526
527         list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
528                 if (!is_migrate_isolate_page(page))
529                         break;
530         /*
531          * if 'non-isolated free hugepage' not found on the list,
532          * the allocation fails.
533          */
534         if (&h->hugepage_freelists[nid] == &page->lru)
535                 return NULL;
536         list_move(&page->lru, &h->hugepage_activelist);
537         set_page_refcounted(page);
538         h->free_huge_pages--;
539         h->free_huge_pages_node[nid]--;
540         return page;
541 }
542
543 /* Movability of hugepages depends on migration support. */
544 static inline gfp_t htlb_alloc_mask(struct hstate *h)
545 {
546         if (hugepages_treat_as_movable || hugepage_migration_supported(h))
547                 return GFP_HIGHUSER_MOVABLE;
548         else
549                 return GFP_HIGHUSER;
550 }
551
552 static struct page *dequeue_huge_page_vma(struct hstate *h,
553                                 struct vm_area_struct *vma,
554                                 unsigned long address, int avoid_reserve,
555                                 long chg)
556 {
557         struct page *page = NULL;
558         struct mempolicy *mpol;
559         nodemask_t *nodemask;
560         struct zonelist *zonelist;
561         struct zone *zone;
562         struct zoneref *z;
563         unsigned int cpuset_mems_cookie;
564
565         /*
566          * A child process with MAP_PRIVATE mappings created by their parent
567          * have no page reserves. This check ensures that reservations are
568          * not "stolen". The child may still get SIGKILLed
569          */
570         if (!vma_has_reserves(vma, chg) &&
571                         h->free_huge_pages - h->resv_huge_pages == 0)
572                 goto err;
573
574         /* If reserves cannot be used, ensure enough pages are in the pool */
575         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
576                 goto err;
577
578 retry_cpuset:
579         cpuset_mems_cookie = read_mems_allowed_begin();
580         zonelist = huge_zonelist(vma, address,
581                                         htlb_alloc_mask(h), &mpol, &nodemask);
582
583         for_each_zone_zonelist_nodemask(zone, z, zonelist,
584                                                 MAX_NR_ZONES - 1, nodemask) {
585                 if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
586                         page = dequeue_huge_page_node(h, zone_to_nid(zone));
587                         if (page) {
588                                 if (avoid_reserve)
589                                         break;
590                                 if (!vma_has_reserves(vma, chg))
591                                         break;
592
593                                 SetPagePrivate(page);
594                                 h->resv_huge_pages--;
595                                 break;
596                         }
597                 }
598         }
599
600         mpol_cond_put(mpol);
601         if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
602                 goto retry_cpuset;
603         return page;
604
605 err:
606         return NULL;
607 }
608
609 /*
610  * common helper functions for hstate_next_node_to_{alloc|free}.
611  * We may have allocated or freed a huge page based on a different
612  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
613  * be outside of *nodes_allowed.  Ensure that we use an allowed
614  * node for alloc or free.
615  */
616 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
617 {
618         nid = next_node(nid, *nodes_allowed);
619         if (nid == MAX_NUMNODES)
620                 nid = first_node(*nodes_allowed);
621         VM_BUG_ON(nid >= MAX_NUMNODES);
622
623         return nid;
624 }
625
626 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
627 {
628         if (!node_isset(nid, *nodes_allowed))
629                 nid = next_node_allowed(nid, nodes_allowed);
630         return nid;
631 }
632
633 /*
634  * returns the previously saved node ["this node"] from which to
635  * allocate a persistent huge page for the pool and advance the
636  * next node from which to allocate, handling wrap at end of node
637  * mask.
638  */
639 static int hstate_next_node_to_alloc(struct hstate *h,
640                                         nodemask_t *nodes_allowed)
641 {
642         int nid;
643
644         VM_BUG_ON(!nodes_allowed);
645
646         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
647         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
648
649         return nid;
650 }
651
652 /*
653  * helper for free_pool_huge_page() - return the previously saved
654  * node ["this node"] from which to free a huge page.  Advance the
655  * next node id whether or not we find a free huge page to free so
656  * that the next attempt to free addresses the next node.
657  */
658 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
659 {
660         int nid;
661
662         VM_BUG_ON(!nodes_allowed);
663
664         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
665         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
666
667         return nid;
668 }
669
670 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)           \
671         for (nr_nodes = nodes_weight(*mask);                            \
672                 nr_nodes > 0 &&                                         \
673                 ((node = hstate_next_node_to_alloc(hs, mask)) || 1);    \
674                 nr_nodes--)
675
676 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)            \
677         for (nr_nodes = nodes_weight(*mask);                            \
678                 nr_nodes > 0 &&                                         \
679                 ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
680                 nr_nodes--)
681
682 #if defined(CONFIG_CMA) && defined(CONFIG_X86_64)
683 static void destroy_compound_gigantic_page(struct page *page,
684                                         unsigned long order)
685 {
686         int i;
687         int nr_pages = 1 << order;
688         struct page *p = page + 1;
689
690         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
691                 __ClearPageTail(p);
692                 set_page_refcounted(p);
693                 p->first_page = NULL;
694         }
695
696         set_compound_order(page, 0);
697         __ClearPageHead(page);
698 }
699
700 static void free_gigantic_page(struct page *page, unsigned order)
701 {
702         free_contig_range(page_to_pfn(page), 1 << order);
703 }
704
705 static int __alloc_gigantic_page(unsigned long start_pfn,
706                                 unsigned long nr_pages)
707 {
708         unsigned long end_pfn = start_pfn + nr_pages;
709         return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
710 }
711
712 static bool pfn_range_valid_gigantic(unsigned long start_pfn,
713                                 unsigned long nr_pages)
714 {
715         unsigned long i, end_pfn = start_pfn + nr_pages;
716         struct page *page;
717
718         for (i = start_pfn; i < end_pfn; i++) {
719                 if (!pfn_valid(i))
720                         return false;
721
722                 page = pfn_to_page(i);
723
724                 if (PageReserved(page))
725                         return false;
726
727                 if (page_count(page) > 0)
728                         return false;
729
730                 if (PageHuge(page))
731                         return false;
732         }
733
734         return true;
735 }
736
737 static bool zone_spans_last_pfn(const struct zone *zone,
738                         unsigned long start_pfn, unsigned long nr_pages)
739 {
740         unsigned long last_pfn = start_pfn + nr_pages - 1;
741         return zone_spans_pfn(zone, last_pfn);
742 }
743
744 static struct page *alloc_gigantic_page(int nid, unsigned order)
745 {
746         unsigned long nr_pages = 1 << order;
747         unsigned long ret, pfn, flags;
748         struct zone *z;
749
750         z = NODE_DATA(nid)->node_zones;
751         for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
752                 spin_lock_irqsave(&z->lock, flags);
753
754                 pfn = ALIGN(z->zone_start_pfn, nr_pages);
755                 while (zone_spans_last_pfn(z, pfn, nr_pages)) {
756                         if (pfn_range_valid_gigantic(pfn, nr_pages)) {
757                                 /*
758                                  * We release the zone lock here because
759                                  * alloc_contig_range() will also lock the zone
760                                  * at some point. If there's an allocation
761                                  * spinning on this lock, it may win the race
762                                  * and cause alloc_contig_range() to fail...
763                                  */
764                                 spin_unlock_irqrestore(&z->lock, flags);
765                                 ret = __alloc_gigantic_page(pfn, nr_pages);
766                                 if (!ret)
767                                         return pfn_to_page(pfn);
768                                 spin_lock_irqsave(&z->lock, flags);
769                         }
770                         pfn += nr_pages;
771                 }
772
773                 spin_unlock_irqrestore(&z->lock, flags);
774         }
775
776         return NULL;
777 }
778
779 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
780 static void prep_compound_gigantic_page(struct page *page, unsigned long order);
781
782 static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
783 {
784         struct page *page;
785
786         page = alloc_gigantic_page(nid, huge_page_order(h));
787         if (page) {
788                 prep_compound_gigantic_page(page, huge_page_order(h));
789                 prep_new_huge_page(h, page, nid);
790         }
791
792         return page;
793 }
794
795 static int alloc_fresh_gigantic_page(struct hstate *h,
796                                 nodemask_t *nodes_allowed)
797 {
798         struct page *page = NULL;
799         int nr_nodes, node;
800
801         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
802                 page = alloc_fresh_gigantic_page_node(h, node);
803                 if (page)
804                         return 1;
805         }
806
807         return 0;
808 }
809
810 static inline bool gigantic_page_supported(void) { return true; }
811 #else
812 static inline bool gigantic_page_supported(void) { return false; }
813 static inline void free_gigantic_page(struct page *page, unsigned order) { }
814 static inline void destroy_compound_gigantic_page(struct page *page,
815                                                 unsigned long order) { }
816 static inline int alloc_fresh_gigantic_page(struct hstate *h,
817                                         nodemask_t *nodes_allowed) { return 0; }
818 #endif
819
820 static void update_and_free_page(struct hstate *h, struct page *page)
821 {
822         int i;
823
824         if (hstate_is_gigantic(h) && !gigantic_page_supported())
825                 return;
826
827         h->nr_huge_pages--;
828         h->nr_huge_pages_node[page_to_nid(page)]--;
829         for (i = 0; i < pages_per_huge_page(h); i++) {
830                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
831                                 1 << PG_referenced | 1 << PG_dirty |
832                                 1 << PG_active | 1 << PG_private |
833                                 1 << PG_writeback);
834         }
835         VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
836         set_compound_page_dtor(page, NULL);
837         set_page_refcounted(page);
838         if (hstate_is_gigantic(h)) {
839                 destroy_compound_gigantic_page(page, huge_page_order(h));
840                 free_gigantic_page(page, huge_page_order(h));
841         } else {
842                 arch_release_hugepage(page);
843                 __free_pages(page, huge_page_order(h));
844         }
845 }
846
847 struct hstate *size_to_hstate(unsigned long size)
848 {
849         struct hstate *h;
850
851         for_each_hstate(h) {
852                 if (huge_page_size(h) == size)
853                         return h;
854         }
855         return NULL;
856 }
857
858 void free_huge_page(struct page *page)
859 {
860         /*
861          * Can't pass hstate in here because it is called from the
862          * compound page destructor.
863          */
864         struct hstate *h = page_hstate(page);
865         int nid = page_to_nid(page);
866         struct hugepage_subpool *spool =
867                 (struct hugepage_subpool *)page_private(page);
868         bool restore_reserve;
869
870         set_page_private(page, 0);
871         page->mapping = NULL;
872         BUG_ON(page_count(page));
873         BUG_ON(page_mapcount(page));
874         restore_reserve = PagePrivate(page);
875         ClearPagePrivate(page);
876
877         spin_lock(&hugetlb_lock);
878         hugetlb_cgroup_uncharge_page(hstate_index(h),
879                                      pages_per_huge_page(h), page);
880         if (restore_reserve)
881                 h->resv_huge_pages++;
882
883         if (h->surplus_huge_pages_node[nid]) {
884                 /* remove the page from active list */
885                 list_del(&page->lru);
886                 update_and_free_page(h, page);
887                 h->surplus_huge_pages--;
888                 h->surplus_huge_pages_node[nid]--;
889         } else {
890                 arch_clear_hugepage_flags(page);
891                 enqueue_huge_page(h, page);
892         }
893         spin_unlock(&hugetlb_lock);
894         hugepage_subpool_put_pages(spool, 1);
895 }
896
897 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
898 {
899         INIT_LIST_HEAD(&page->lru);
900         set_compound_page_dtor(page, free_huge_page);
901         spin_lock(&hugetlb_lock);
902         set_hugetlb_cgroup(page, NULL);
903         h->nr_huge_pages++;
904         h->nr_huge_pages_node[nid]++;
905         spin_unlock(&hugetlb_lock);
906         put_page(page); /* free it into the hugepage allocator */
907 }
908
909 static void prep_compound_gigantic_page(struct page *page, unsigned long order)
910 {
911         int i;
912         int nr_pages = 1 << order;
913         struct page *p = page + 1;
914
915         /* we rely on prep_new_huge_page to set the destructor */
916         set_compound_order(page, order);
917         __SetPageHead(page);
918         __ClearPageReserved(page);
919         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
920                 /*
921                  * For gigantic hugepages allocated through bootmem at
922                  * boot, it's safer to be consistent with the not-gigantic
923                  * hugepages and clear the PG_reserved bit from all tail pages
924                  * too.  Otherwse drivers using get_user_pages() to access tail
925                  * pages may get the reference counting wrong if they see
926                  * PG_reserved set on a tail page (despite the head page not
927                  * having PG_reserved set).  Enforcing this consistency between
928                  * head and tail pages allows drivers to optimize away a check
929                  * on the head page when they need know if put_page() is needed
930                  * after get_user_pages().
931                  */
932                 __ClearPageReserved(p);
933                 set_page_count(p, 0);
934                 p->first_page = page;
935                 /* Make sure p->first_page is always valid for PageTail() */
936                 smp_wmb();
937                 __SetPageTail(p);
938         }
939 }
940
941 /*
942  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
943  * transparent huge pages.  See the PageTransHuge() documentation for more
944  * details.
945  */
946 int PageHuge(struct page *page)
947 {
948         if (!PageCompound(page))
949                 return 0;
950
951         page = compound_head(page);
952         return get_compound_page_dtor(page) == free_huge_page;
953 }
954 EXPORT_SYMBOL_GPL(PageHuge);
955
956 /*
957  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
958  * normal or transparent huge pages.
959  */
960 int PageHeadHuge(struct page *page_head)
961 {
962         if (!PageHead(page_head))
963                 return 0;
964
965         return get_compound_page_dtor(page_head) == free_huge_page;
966 }
967
968 pgoff_t __basepage_index(struct page *page)
969 {
970         struct page *page_head = compound_head(page);
971         pgoff_t index = page_index(page_head);
972         unsigned long compound_idx;
973
974         if (!PageHuge(page_head))
975                 return page_index(page);
976
977         if (compound_order(page_head) >= MAX_ORDER)
978                 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
979         else
980                 compound_idx = page - page_head;
981
982         return (index << compound_order(page_head)) + compound_idx;
983 }
984
985 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
986 {
987         struct page *page;
988
989         page = alloc_pages_exact_node(nid,
990                 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
991                                                 __GFP_REPEAT|__GFP_NOWARN,
992                 huge_page_order(h));
993         if (page) {
994                 if (arch_prepare_hugepage(page)) {
995                         __free_pages(page, huge_page_order(h));
996                         return NULL;
997                 }
998                 prep_new_huge_page(h, page, nid);
999         }
1000
1001         return page;
1002 }
1003
1004 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1005 {
1006         struct page *page;
1007         int nr_nodes, node;
1008         int ret = 0;
1009
1010         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1011                 page = alloc_fresh_huge_page_node(h, node);
1012                 if (page) {
1013                         ret = 1;
1014                         break;
1015                 }
1016         }
1017
1018         if (ret)
1019                 count_vm_event(HTLB_BUDDY_PGALLOC);
1020         else
1021                 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1022
1023         return ret;
1024 }
1025
1026 /*
1027  * Free huge page from pool from next node to free.
1028  * Attempt to keep persistent huge pages more or less
1029  * balanced over allowed nodes.
1030  * Called with hugetlb_lock locked.
1031  */
1032 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1033                                                          bool acct_surplus)
1034 {
1035         int nr_nodes, node;
1036         int ret = 0;
1037
1038         for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1039                 /*
1040                  * If we're returning unused surplus pages, only examine
1041                  * nodes with surplus pages.
1042                  */
1043                 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1044                     !list_empty(&h->hugepage_freelists[node])) {
1045                         struct page *page =
1046                                 list_entry(h->hugepage_freelists[node].next,
1047                                           struct page, lru);
1048                         list_del(&page->lru);
1049                         h->free_huge_pages--;
1050                         h->free_huge_pages_node[node]--;
1051                         if (acct_surplus) {
1052                                 h->surplus_huge_pages--;
1053                                 h->surplus_huge_pages_node[node]--;
1054                         }
1055                         update_and_free_page(h, page);
1056                         ret = 1;
1057                         break;
1058                 }
1059         }
1060
1061         return ret;
1062 }
1063
1064 /*
1065  * Dissolve a given free hugepage into free buddy pages. This function does
1066  * nothing for in-use (including surplus) hugepages.
1067  */
1068 static void dissolve_free_huge_page(struct page *page)
1069 {
1070         spin_lock(&hugetlb_lock);
1071         if (PageHuge(page) && !page_count(page)) {
1072                 struct hstate *h = page_hstate(page);
1073                 int nid = page_to_nid(page);
1074                 list_del(&page->lru);
1075                 h->free_huge_pages--;
1076                 h->free_huge_pages_node[nid]--;
1077                 update_and_free_page(h, page);
1078         }
1079         spin_unlock(&hugetlb_lock);
1080 }
1081
1082 /*
1083  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1084  * make specified memory blocks removable from the system.
1085  * Note that start_pfn should aligned with (minimum) hugepage size.
1086  */
1087 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1088 {
1089         unsigned int order = 8 * sizeof(void *);
1090         unsigned long pfn;
1091         struct hstate *h;
1092
1093         if (!hugepages_supported())
1094                 return;
1095
1096         /* Set scan step to minimum hugepage size */
1097         for_each_hstate(h)
1098                 if (order > huge_page_order(h))
1099                         order = huge_page_order(h);
1100         VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << order));
1101         for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order)
1102                 dissolve_free_huge_page(pfn_to_page(pfn));
1103 }
1104
1105 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
1106 {
1107         struct page *page;
1108         unsigned int r_nid;
1109
1110         if (hstate_is_gigantic(h))
1111                 return NULL;
1112
1113         /*
1114          * Assume we will successfully allocate the surplus page to
1115          * prevent racing processes from causing the surplus to exceed
1116          * overcommit
1117          *
1118          * This however introduces a different race, where a process B
1119          * tries to grow the static hugepage pool while alloc_pages() is
1120          * called by process A. B will only examine the per-node
1121          * counters in determining if surplus huge pages can be
1122          * converted to normal huge pages in adjust_pool_surplus(). A
1123          * won't be able to increment the per-node counter, until the
1124          * lock is dropped by B, but B doesn't drop hugetlb_lock until
1125          * no more huge pages can be converted from surplus to normal
1126          * state (and doesn't try to convert again). Thus, we have a
1127          * case where a surplus huge page exists, the pool is grown, and
1128          * the surplus huge page still exists after, even though it
1129          * should just have been converted to a normal huge page. This
1130          * does not leak memory, though, as the hugepage will be freed
1131          * once it is out of use. It also does not allow the counters to
1132          * go out of whack in adjust_pool_surplus() as we don't modify
1133          * the node values until we've gotten the hugepage and only the
1134          * per-node value is checked there.
1135          */
1136         spin_lock(&hugetlb_lock);
1137         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1138                 spin_unlock(&hugetlb_lock);
1139                 return NULL;
1140         } else {
1141                 h->nr_huge_pages++;
1142                 h->surplus_huge_pages++;
1143         }
1144         spin_unlock(&hugetlb_lock);
1145
1146         if (nid == NUMA_NO_NODE)
1147                 page = alloc_pages(htlb_alloc_mask(h)|__GFP_COMP|
1148                                    __GFP_REPEAT|__GFP_NOWARN,
1149                                    huge_page_order(h));
1150         else
1151                 page = alloc_pages_exact_node(nid,
1152                         htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1153                         __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
1154
1155         if (page && arch_prepare_hugepage(page)) {
1156                 __free_pages(page, huge_page_order(h));
1157                 page = NULL;
1158         }
1159
1160         spin_lock(&hugetlb_lock);
1161         if (page) {
1162                 INIT_LIST_HEAD(&page->lru);
1163                 r_nid = page_to_nid(page);
1164                 set_compound_page_dtor(page, free_huge_page);
1165                 set_hugetlb_cgroup(page, NULL);
1166                 /*
1167                  * We incremented the global counters already
1168                  */
1169                 h->nr_huge_pages_node[r_nid]++;
1170                 h->surplus_huge_pages_node[r_nid]++;
1171                 __count_vm_event(HTLB_BUDDY_PGALLOC);
1172         } else {
1173                 h->nr_huge_pages--;
1174                 h->surplus_huge_pages--;
1175                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1176         }
1177         spin_unlock(&hugetlb_lock);
1178
1179         return page;
1180 }
1181
1182 /*
1183  * This allocation function is useful in the context where vma is irrelevant.
1184  * E.g. soft-offlining uses this function because it only cares physical
1185  * address of error page.
1186  */
1187 struct page *alloc_huge_page_node(struct hstate *h, int nid)
1188 {
1189         struct page *page = NULL;
1190
1191         spin_lock(&hugetlb_lock);
1192         if (h->free_huge_pages - h->resv_huge_pages > 0)
1193                 page = dequeue_huge_page_node(h, nid);
1194         spin_unlock(&hugetlb_lock);
1195
1196         if (!page)
1197                 page = alloc_buddy_huge_page(h, nid);
1198
1199         return page;
1200 }
1201
1202 /*
1203  * Increase the hugetlb pool such that it can accommodate a reservation
1204  * of size 'delta'.
1205  */
1206 static int gather_surplus_pages(struct hstate *h, int delta)
1207 {
1208         struct list_head surplus_list;
1209         struct page *page, *tmp;
1210         int ret, i;
1211         int needed, allocated;
1212         bool alloc_ok = true;
1213
1214         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1215         if (needed <= 0) {
1216                 h->resv_huge_pages += delta;
1217                 return 0;
1218         }
1219
1220         allocated = 0;
1221         INIT_LIST_HEAD(&surplus_list);
1222
1223         ret = -ENOMEM;
1224 retry:
1225         spin_unlock(&hugetlb_lock);
1226         for (i = 0; i < needed; i++) {
1227                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1228                 if (!page) {
1229                         alloc_ok = false;
1230                         break;
1231                 }
1232                 list_add(&page->lru, &surplus_list);
1233         }
1234         allocated += i;
1235
1236         /*
1237          * After retaking hugetlb_lock, we need to recalculate 'needed'
1238          * because either resv_huge_pages or free_huge_pages may have changed.
1239          */
1240         spin_lock(&hugetlb_lock);
1241         needed = (h->resv_huge_pages + delta) -
1242                         (h->free_huge_pages + allocated);
1243         if (needed > 0) {
1244                 if (alloc_ok)
1245                         goto retry;
1246                 /*
1247                  * We were not able to allocate enough pages to
1248                  * satisfy the entire reservation so we free what
1249                  * we've allocated so far.
1250                  */
1251                 goto free;
1252         }
1253         /*
1254          * The surplus_list now contains _at_least_ the number of extra pages
1255          * needed to accommodate the reservation.  Add the appropriate number
1256          * of pages to the hugetlb pool and free the extras back to the buddy
1257          * allocator.  Commit the entire reservation here to prevent another
1258          * process from stealing the pages as they are added to the pool but
1259          * before they are reserved.
1260          */
1261         needed += allocated;
1262         h->resv_huge_pages += delta;
1263         ret = 0;
1264
1265         /* Free the needed pages to the hugetlb pool */
1266         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1267                 if ((--needed) < 0)
1268                         break;
1269                 /*
1270                  * This page is now managed by the hugetlb allocator and has
1271                  * no users -- drop the buddy allocator's reference.
1272                  */
1273                 put_page_testzero(page);
1274                 VM_BUG_ON_PAGE(page_count(page), page);
1275                 enqueue_huge_page(h, page);
1276         }
1277 free:
1278         spin_unlock(&hugetlb_lock);
1279
1280         /* Free unnecessary surplus pages to the buddy allocator */
1281         list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1282                 put_page(page);
1283         spin_lock(&hugetlb_lock);
1284
1285         return ret;
1286 }
1287
1288 /*
1289  * When releasing a hugetlb pool reservation, any surplus pages that were
1290  * allocated to satisfy the reservation must be explicitly freed if they were
1291  * never used.
1292  * Called with hugetlb_lock held.
1293  */
1294 static void return_unused_surplus_pages(struct hstate *h,
1295                                         unsigned long unused_resv_pages)
1296 {
1297         unsigned long nr_pages;
1298
1299         /* Uncommit the reservation */
1300         h->resv_huge_pages -= unused_resv_pages;
1301
1302         /* Cannot return gigantic pages currently */
1303         if (hstate_is_gigantic(h))
1304                 return;
1305
1306         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1307
1308         /*
1309          * We want to release as many surplus pages as possible, spread
1310          * evenly across all nodes with memory. Iterate across these nodes
1311          * until we can no longer free unreserved surplus pages. This occurs
1312          * when the nodes with surplus pages have no free pages.
1313          * free_pool_huge_page() will balance the the freed pages across the
1314          * on-line nodes with memory and will handle the hstate accounting.
1315          */
1316         while (nr_pages--) {
1317                 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1318                         break;
1319                 cond_resched_lock(&hugetlb_lock);
1320         }
1321 }
1322
1323 /*
1324  * Determine if the huge page at addr within the vma has an associated
1325  * reservation.  Where it does not we will need to logically increase
1326  * reservation and actually increase subpool usage before an allocation
1327  * can occur.  Where any new reservation would be required the
1328  * reservation change is prepared, but not committed.  Once the page
1329  * has been allocated from the subpool and instantiated the change should
1330  * be committed via vma_commit_reservation.  No action is required on
1331  * failure.
1332  */
1333 static long vma_needs_reservation(struct hstate *h,
1334                         struct vm_area_struct *vma, unsigned long addr)
1335 {
1336         struct resv_map *resv;
1337         pgoff_t idx;
1338         long chg;
1339
1340         resv = vma_resv_map(vma);
1341         if (!resv)
1342                 return 1;
1343
1344         idx = vma_hugecache_offset(h, vma, addr);
1345         chg = region_chg(resv, idx, idx + 1);
1346
1347         if (vma->vm_flags & VM_MAYSHARE)
1348                 return chg;
1349         else
1350                 return chg < 0 ? chg : 0;
1351 }
1352 static void vma_commit_reservation(struct hstate *h,
1353                         struct vm_area_struct *vma, unsigned long addr)
1354 {
1355         struct resv_map *resv;
1356         pgoff_t idx;
1357
1358         resv = vma_resv_map(vma);
1359         if (!resv)
1360                 return;
1361
1362         idx = vma_hugecache_offset(h, vma, addr);
1363         region_add(resv, idx, idx + 1);
1364 }
1365
1366 static struct page *alloc_huge_page(struct vm_area_struct *vma,
1367                                     unsigned long addr, int avoid_reserve)
1368 {
1369         struct hugepage_subpool *spool = subpool_vma(vma);
1370         struct hstate *h = hstate_vma(vma);
1371         struct page *page;
1372         long chg;
1373         int ret, idx;
1374         struct hugetlb_cgroup *h_cg;
1375
1376         idx = hstate_index(h);
1377         /*
1378          * Processes that did not create the mapping will have no
1379          * reserves and will not have accounted against subpool
1380          * limit. Check that the subpool limit can be made before
1381          * satisfying the allocation MAP_NORESERVE mappings may also
1382          * need pages and subpool limit allocated allocated if no reserve
1383          * mapping overlaps.
1384          */
1385         chg = vma_needs_reservation(h, vma, addr);
1386         if (chg < 0)
1387                 return ERR_PTR(-ENOMEM);
1388         if (chg || avoid_reserve)
1389                 if (hugepage_subpool_get_pages(spool, 1))
1390                         return ERR_PTR(-ENOSPC);
1391
1392         ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1393         if (ret)
1394                 goto out_subpool_put;
1395
1396         spin_lock(&hugetlb_lock);
1397         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg);
1398         if (!page) {
1399                 spin_unlock(&hugetlb_lock);
1400                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1401                 if (!page)
1402                         goto out_uncharge_cgroup;
1403
1404                 spin_lock(&hugetlb_lock);
1405                 list_move(&page->lru, &h->hugepage_activelist);
1406                 /* Fall through */
1407         }
1408         hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
1409         spin_unlock(&hugetlb_lock);
1410
1411         set_page_private(page, (unsigned long)spool);
1412
1413         vma_commit_reservation(h, vma, addr);
1414         return page;
1415
1416 out_uncharge_cgroup:
1417         hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
1418 out_subpool_put:
1419         if (chg || avoid_reserve)
1420                 hugepage_subpool_put_pages(spool, 1);
1421         return ERR_PTR(-ENOSPC);
1422 }
1423
1424 /*
1425  * alloc_huge_page()'s wrapper which simply returns the page if allocation
1426  * succeeds, otherwise NULL. This function is called from new_vma_page(),
1427  * where no ERR_VALUE is expected to be returned.
1428  */
1429 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
1430                                 unsigned long addr, int avoid_reserve)
1431 {
1432         struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
1433         if (IS_ERR(page))
1434                 page = NULL;
1435         return page;
1436 }
1437
1438 int __weak alloc_bootmem_huge_page(struct hstate *h)
1439 {
1440         struct huge_bootmem_page *m;
1441         int nr_nodes, node;
1442
1443         for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
1444                 void *addr;
1445
1446                 addr = memblock_virt_alloc_try_nid_nopanic(
1447                                 huge_page_size(h), huge_page_size(h),
1448                                 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
1449                 if (addr) {
1450                         /*
1451                          * Use the beginning of the huge page to store the
1452                          * huge_bootmem_page struct (until gather_bootmem
1453                          * puts them into the mem_map).
1454                          */
1455                         m = addr;
1456                         goto found;
1457                 }
1458         }
1459         return 0;
1460
1461 found:
1462         BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
1463         /* Put them into a private list first because mem_map is not up yet */
1464         list_add(&m->list, &huge_boot_pages);
1465         m->hstate = h;
1466         return 1;
1467 }
1468
1469 static void __init prep_compound_huge_page(struct page *page, int order)
1470 {
1471         if (unlikely(order > (MAX_ORDER - 1)))
1472                 prep_compound_gigantic_page(page, order);
1473         else
1474                 prep_compound_page(page, order);
1475 }
1476
1477 /* Put bootmem huge pages into the standard lists after mem_map is up */
1478 static void __init gather_bootmem_prealloc(void)
1479 {
1480         struct huge_bootmem_page *m;
1481
1482         list_for_each_entry(m, &huge_boot_pages, list) {
1483                 struct hstate *h = m->hstate;
1484                 struct page *page;
1485
1486 #ifdef CONFIG_HIGHMEM
1487                 page = pfn_to_page(m->phys >> PAGE_SHIFT);
1488                 memblock_free_late(__pa(m),
1489                                    sizeof(struct huge_bootmem_page));
1490 #else
1491                 page = virt_to_page(m);
1492 #endif
1493                 WARN_ON(page_count(page) != 1);
1494                 prep_compound_huge_page(page, h->order);
1495                 WARN_ON(PageReserved(page));
1496                 prep_new_huge_page(h, page, page_to_nid(page));
1497                 /*
1498                  * If we had gigantic hugepages allocated at boot time, we need
1499                  * to restore the 'stolen' pages to totalram_pages in order to
1500                  * fix confusing memory reports from free(1) and another
1501                  * side-effects, like CommitLimit going negative.
1502                  */
1503                 if (hstate_is_gigantic(h))
1504                         adjust_managed_page_count(page, 1 << h->order);
1505         }
1506 }
1507
1508 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1509 {
1510         unsigned long i;
1511
1512         for (i = 0; i < h->max_huge_pages; ++i) {
1513                 if (hstate_is_gigantic(h)) {
1514                         if (!alloc_bootmem_huge_page(h))
1515                                 break;
1516                 } else if (!alloc_fresh_huge_page(h,
1517                                          &node_states[N_MEMORY]))
1518                         break;
1519         }
1520         h->max_huge_pages = i;
1521 }
1522
1523 static void __init hugetlb_init_hstates(void)
1524 {
1525         struct hstate *h;
1526
1527         for_each_hstate(h) {
1528                 /* oversize hugepages were init'ed in early boot */
1529                 if (!hstate_is_gigantic(h))
1530                         hugetlb_hstate_alloc_pages(h);
1531         }
1532 }
1533
1534 static char * __init memfmt(char *buf, unsigned long n)
1535 {
1536         if (n >= (1UL << 30))
1537                 sprintf(buf, "%lu GB", n >> 30);
1538         else if (n >= (1UL << 20))
1539                 sprintf(buf, "%lu MB", n >> 20);
1540         else
1541                 sprintf(buf, "%lu KB", n >> 10);
1542         return buf;
1543 }
1544
1545 static void __init report_hugepages(void)
1546 {
1547         struct hstate *h;
1548
1549         for_each_hstate(h) {
1550                 char buf[32];
1551                 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
1552                         memfmt(buf, huge_page_size(h)),
1553                         h->free_huge_pages);
1554         }
1555 }
1556
1557 #ifdef CONFIG_HIGHMEM
1558 static void try_to_free_low(struct hstate *h, unsigned long count,
1559                                                 nodemask_t *nodes_allowed)
1560 {
1561         int i;
1562
1563         if (hstate_is_gigantic(h))
1564                 return;
1565
1566         for_each_node_mask(i, *nodes_allowed) {
1567                 struct page *page, *next;
1568                 struct list_head *freel = &h->hugepage_freelists[i];
1569                 list_for_each_entry_safe(page, next, freel, lru) {
1570                         if (count >= h->nr_huge_pages)
1571                                 return;
1572                         if (PageHighMem(page))
1573                                 continue;
1574                         list_del(&page->lru);
1575                         update_and_free_page(h, page);
1576                         h->free_huge_pages--;
1577                         h->free_huge_pages_node[page_to_nid(page)]--;
1578                 }
1579         }
1580 }
1581 #else
1582 static inline void try_to_free_low(struct hstate *h, unsigned long count,
1583                                                 nodemask_t *nodes_allowed)
1584 {
1585 }
1586 #endif
1587
1588 /*
1589  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
1590  * balanced by operating on them in a round-robin fashion.
1591  * Returns 1 if an adjustment was made.
1592  */
1593 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1594                                 int delta)
1595 {
1596         int nr_nodes, node;
1597
1598         VM_BUG_ON(delta != -1 && delta != 1);
1599
1600         if (delta < 0) {
1601                 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1602                         if (h->surplus_huge_pages_node[node])
1603                                 goto found;
1604                 }
1605         } else {
1606                 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1607                         if (h->surplus_huge_pages_node[node] <
1608                                         h->nr_huge_pages_node[node])
1609                                 goto found;
1610                 }
1611         }
1612         return 0;
1613
1614 found:
1615         h->surplus_huge_pages += delta;
1616         h->surplus_huge_pages_node[node] += delta;
1617         return 1;
1618 }
1619
1620 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1621 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1622                                                 nodemask_t *nodes_allowed)
1623 {
1624         unsigned long min_count, ret;
1625
1626         if (hstate_is_gigantic(h) && !gigantic_page_supported())
1627                 return h->max_huge_pages;
1628
1629         /*
1630          * Increase the pool size
1631          * First take pages out of surplus state.  Then make up the
1632          * remaining difference by allocating fresh huge pages.
1633          *
1634          * We might race with alloc_buddy_huge_page() here and be unable
1635          * to convert a surplus huge page to a normal huge page. That is
1636          * not critical, though, it just means the overall size of the
1637          * pool might be one hugepage larger than it needs to be, but
1638          * within all the constraints specified by the sysctls.
1639          */
1640         spin_lock(&hugetlb_lock);
1641         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1642                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
1643                         break;
1644         }
1645
1646         while (count > persistent_huge_pages(h)) {
1647                 /*
1648                  * If this allocation races such that we no longer need the
1649                  * page, free_huge_page will handle it by freeing the page
1650                  * and reducing the surplus.
1651                  */
1652                 spin_unlock(&hugetlb_lock);
1653                 if (hstate_is_gigantic(h))
1654                         ret = alloc_fresh_gigantic_page(h, nodes_allowed);
1655                 else
1656                         ret = alloc_fresh_huge_page(h, nodes_allowed);
1657                 spin_lock(&hugetlb_lock);
1658                 if (!ret)
1659                         goto out;
1660
1661                 /* Bail for signals. Probably ctrl-c from user */
1662                 if (signal_pending(current))
1663                         goto out;
1664         }
1665
1666         /*
1667          * Decrease the pool size
1668          * First return free pages to the buddy allocator (being careful
1669          * to keep enough around to satisfy reservations).  Then place
1670          * pages into surplus state as needed so the pool will shrink
1671          * to the desired size as pages become free.
1672          *
1673          * By placing pages into the surplus state independent of the
1674          * overcommit value, we are allowing the surplus pool size to
1675          * exceed overcommit. There are few sane options here. Since
1676          * alloc_buddy_huge_page() is checking the global counter,
1677          * though, we'll note that we're not allowed to exceed surplus
1678          * and won't grow the pool anywhere else. Not until one of the
1679          * sysctls are changed, or the surplus pages go out of use.
1680          */
1681         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1682         min_count = max(count, min_count);
1683         try_to_free_low(h, min_count, nodes_allowed);
1684         while (min_count < persistent_huge_pages(h)) {
1685                 if (!free_pool_huge_page(h, nodes_allowed, 0))
1686                         break;
1687                 cond_resched_lock(&hugetlb_lock);
1688         }
1689         while (count < persistent_huge_pages(h)) {
1690                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
1691                         break;
1692         }
1693 out:
1694         ret = persistent_huge_pages(h);
1695         spin_unlock(&hugetlb_lock);
1696         return ret;
1697 }
1698
1699 #define HSTATE_ATTR_RO(_name) \
1700         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1701
1702 #define HSTATE_ATTR(_name) \
1703         static struct kobj_attribute _name##_attr = \
1704                 __ATTR(_name, 0644, _name##_show, _name##_store)
1705
1706 static struct kobject *hugepages_kobj;
1707 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1708
1709 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1710
1711 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1712 {
1713         int i;
1714
1715         for (i = 0; i < HUGE_MAX_HSTATE; i++)
1716                 if (hstate_kobjs[i] == kobj) {
1717                         if (nidp)
1718                                 *nidp = NUMA_NO_NODE;
1719                         return &hstates[i];
1720                 }
1721
1722         return kobj_to_node_hstate(kobj, nidp);
1723 }
1724
1725 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1726                                         struct kobj_attribute *attr, char *buf)
1727 {
1728         struct hstate *h;
1729         unsigned long nr_huge_pages;
1730         int nid;
1731
1732         h = kobj_to_hstate(kobj, &nid);
1733         if (nid == NUMA_NO_NODE)
1734                 nr_huge_pages = h->nr_huge_pages;
1735         else
1736                 nr_huge_pages = h->nr_huge_pages_node[nid];
1737
1738         return sprintf(buf, "%lu\n", nr_huge_pages);
1739 }
1740
1741 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
1742                                            struct hstate *h, int nid,
1743                                            unsigned long count, size_t len)
1744 {
1745         int err;
1746         NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1747
1748         if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
1749                 err = -EINVAL;
1750                 goto out;
1751         }
1752
1753         if (nid == NUMA_NO_NODE) {
1754                 /*
1755                  * global hstate attribute
1756                  */
1757                 if (!(obey_mempolicy &&
1758                                 init_nodemask_of_mempolicy(nodes_allowed))) {
1759                         NODEMASK_FREE(nodes_allowed);
1760                         nodes_allowed = &node_states[N_MEMORY];
1761                 }
1762         } else if (nodes_allowed) {
1763                 /*
1764                  * per node hstate attribute: adjust count to global,
1765                  * but restrict alloc/free to the specified node.
1766                  */
1767                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1768                 init_nodemask_of_node(nodes_allowed, nid);
1769         } else
1770                 nodes_allowed = &node_states[N_MEMORY];
1771
1772         h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1773
1774         if (nodes_allowed != &node_states[N_MEMORY])
1775                 NODEMASK_FREE(nodes_allowed);
1776
1777         return len;
1778 out:
1779         NODEMASK_FREE(nodes_allowed);
1780         return err;
1781 }
1782
1783 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1784                                          struct kobject *kobj, const char *buf,
1785                                          size_t len)
1786 {
1787         struct hstate *h;
1788         unsigned long count;
1789         int nid;
1790         int err;
1791
1792         err = kstrtoul(buf, 10, &count);
1793         if (err)
1794                 return err;
1795
1796         h = kobj_to_hstate(kobj, &nid);
1797         return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
1798 }
1799
1800 static ssize_t nr_hugepages_show(struct kobject *kobj,
1801                                        struct kobj_attribute *attr, char *buf)
1802 {
1803         return nr_hugepages_show_common(kobj, attr, buf);
1804 }
1805
1806 static ssize_t nr_hugepages_store(struct kobject *kobj,
1807                struct kobj_attribute *attr, const char *buf, size_t len)
1808 {
1809         return nr_hugepages_store_common(false, kobj, buf, len);
1810 }
1811 HSTATE_ATTR(nr_hugepages);
1812
1813 #ifdef CONFIG_NUMA
1814
1815 /*
1816  * hstate attribute for optionally mempolicy-based constraint on persistent
1817  * huge page alloc/free.
1818  */
1819 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1820                                        struct kobj_attribute *attr, char *buf)
1821 {
1822         return nr_hugepages_show_common(kobj, attr, buf);
1823 }
1824
1825 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1826                struct kobj_attribute *attr, const char *buf, size_t len)
1827 {
1828         return nr_hugepages_store_common(true, kobj, buf, len);
1829 }
1830 HSTATE_ATTR(nr_hugepages_mempolicy);
1831 #endif
1832
1833
1834 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1835                                         struct kobj_attribute *attr, char *buf)
1836 {
1837         struct hstate *h = kobj_to_hstate(kobj, NULL);
1838         return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1839 }
1840
1841 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1842                 struct kobj_attribute *attr, const char *buf, size_t count)
1843 {
1844         int err;
1845         unsigned long input;
1846         struct hstate *h = kobj_to_hstate(kobj, NULL);
1847
1848         if (hstate_is_gigantic(h))
1849                 return -EINVAL;
1850
1851         err = kstrtoul(buf, 10, &input);
1852         if (err)
1853                 return err;
1854
1855         spin_lock(&hugetlb_lock);
1856         h->nr_overcommit_huge_pages = input;
1857         spin_unlock(&hugetlb_lock);
1858
1859         return count;
1860 }
1861 HSTATE_ATTR(nr_overcommit_hugepages);
1862
1863 static ssize_t free_hugepages_show(struct kobject *kobj,
1864                                         struct kobj_attribute *attr, char *buf)
1865 {
1866         struct hstate *h;
1867         unsigned long free_huge_pages;
1868         int nid;
1869
1870         h = kobj_to_hstate(kobj, &nid);
1871         if (nid == NUMA_NO_NODE)
1872                 free_huge_pages = h->free_huge_pages;
1873         else
1874                 free_huge_pages = h->free_huge_pages_node[nid];
1875
1876         return sprintf(buf, "%lu\n", free_huge_pages);
1877 }
1878 HSTATE_ATTR_RO(free_hugepages);
1879
1880 static ssize_t resv_hugepages_show(struct kobject *kobj,
1881                                         struct kobj_attribute *attr, char *buf)
1882 {
1883         struct hstate *h = kobj_to_hstate(kobj, NULL);
1884         return sprintf(buf, "%lu\n", h->resv_huge_pages);
1885 }
1886 HSTATE_ATTR_RO(resv_hugepages);
1887
1888 static ssize_t surplus_hugepages_show(struct kobject *kobj,
1889                                         struct kobj_attribute *attr, char *buf)
1890 {
1891         struct hstate *h;
1892         unsigned long surplus_huge_pages;
1893         int nid;
1894
1895         h = kobj_to_hstate(kobj, &nid);
1896         if (nid == NUMA_NO_NODE)
1897                 surplus_huge_pages = h->surplus_huge_pages;
1898         else
1899                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
1900
1901         return sprintf(buf, "%lu\n", surplus_huge_pages);
1902 }
1903 HSTATE_ATTR_RO(surplus_hugepages);
1904
1905 static struct attribute *hstate_attrs[] = {
1906         &nr_hugepages_attr.attr,
1907         &nr_overcommit_hugepages_attr.attr,
1908         &free_hugepages_attr.attr,
1909         &resv_hugepages_attr.attr,
1910         &surplus_hugepages_attr.attr,
1911 #ifdef CONFIG_NUMA
1912         &nr_hugepages_mempolicy_attr.attr,
1913 #endif
1914         NULL,
1915 };
1916
1917 static struct attribute_group hstate_attr_group = {
1918         .attrs = hstate_attrs,
1919 };
1920
1921 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1922                                     struct kobject **hstate_kobjs,
1923                                     struct attribute_group *hstate_attr_group)
1924 {
1925         int retval;
1926         int hi = hstate_index(h);
1927
1928         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
1929         if (!hstate_kobjs[hi])
1930                 return -ENOMEM;
1931
1932         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
1933         if (retval)
1934                 kobject_put(hstate_kobjs[hi]);
1935
1936         return retval;
1937 }
1938
1939 static void __init hugetlb_sysfs_init(void)
1940 {
1941         struct hstate *h;
1942         int err;
1943
1944         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1945         if (!hugepages_kobj)
1946                 return;
1947
1948         for_each_hstate(h) {
1949                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
1950                                          hstate_kobjs, &hstate_attr_group);
1951                 if (err)
1952                         pr_err("Hugetlb: Unable to add hstate %s", h->name);
1953         }
1954 }
1955
1956 #ifdef CONFIG_NUMA
1957
1958 /*
1959  * node_hstate/s - associate per node hstate attributes, via their kobjects,
1960  * with node devices in node_devices[] using a parallel array.  The array
1961  * index of a node device or _hstate == node id.
1962  * This is here to avoid any static dependency of the node device driver, in
1963  * the base kernel, on the hugetlb module.
1964  */
1965 struct node_hstate {
1966         struct kobject          *hugepages_kobj;
1967         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
1968 };
1969 struct node_hstate node_hstates[MAX_NUMNODES];
1970
1971 /*
1972  * A subset of global hstate attributes for node devices
1973  */
1974 static struct attribute *per_node_hstate_attrs[] = {
1975         &nr_hugepages_attr.attr,
1976         &free_hugepages_attr.attr,
1977         &surplus_hugepages_attr.attr,
1978         NULL,
1979 };
1980
1981 static struct attribute_group per_node_hstate_attr_group = {
1982         .attrs = per_node_hstate_attrs,
1983 };
1984
1985 /*
1986  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
1987  * Returns node id via non-NULL nidp.
1988  */
1989 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1990 {
1991         int nid;
1992
1993         for (nid = 0; nid < nr_node_ids; nid++) {
1994                 struct node_hstate *nhs = &node_hstates[nid];
1995                 int i;
1996                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
1997                         if (nhs->hstate_kobjs[i] == kobj) {
1998                                 if (nidp)
1999                                         *nidp = nid;
2000                                 return &hstates[i];
2001                         }
2002         }
2003
2004         BUG();
2005         return NULL;
2006 }
2007
2008 /*
2009  * Unregister hstate attributes from a single node device.
2010  * No-op if no hstate attributes attached.
2011  */
2012 static void hugetlb_unregister_node(struct node *node)
2013 {
2014         struct hstate *h;
2015         struct node_hstate *nhs = &node_hstates[node->dev.id];
2016
2017         if (!nhs->hugepages_kobj)
2018                 return;         /* no hstate attributes */
2019
2020         for_each_hstate(h) {
2021                 int idx = hstate_index(h);
2022                 if (nhs->hstate_kobjs[idx]) {
2023                         kobject_put(nhs->hstate_kobjs[idx]);
2024                         nhs->hstate_kobjs[idx] = NULL;
2025                 }
2026         }
2027
2028         kobject_put(nhs->hugepages_kobj);
2029         nhs->hugepages_kobj = NULL;
2030 }
2031
2032 /*
2033  * hugetlb module exit:  unregister hstate attributes from node devices
2034  * that have them.
2035  */
2036 static void hugetlb_unregister_all_nodes(void)
2037 {
2038         int nid;
2039
2040         /*
2041          * disable node device registrations.
2042          */
2043         register_hugetlbfs_with_node(NULL, NULL);
2044
2045         /*
2046          * remove hstate attributes from any nodes that have them.
2047          */
2048         for (nid = 0; nid < nr_node_ids; nid++)
2049                 hugetlb_unregister_node(node_devices[nid]);
2050 }
2051
2052 /*
2053  * Register hstate attributes for a single node device.
2054  * No-op if attributes already registered.
2055  */
2056 static void hugetlb_register_node(struct node *node)
2057 {
2058         struct hstate *h;
2059         struct node_hstate *nhs = &node_hstates[node->dev.id];
2060         int err;
2061
2062         if (nhs->hugepages_kobj)
2063                 return;         /* already allocated */
2064
2065         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2066                                                         &node->dev.kobj);
2067         if (!nhs->hugepages_kobj)
2068                 return;
2069
2070         for_each_hstate(h) {
2071                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2072                                                 nhs->hstate_kobjs,
2073                                                 &per_node_hstate_attr_group);
2074                 if (err) {
2075                         pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2076                                 h->name, node->dev.id);
2077                         hugetlb_unregister_node(node);
2078                         break;
2079                 }
2080         }
2081 }
2082
2083 /*
2084  * hugetlb init time:  register hstate attributes for all registered node
2085  * devices of nodes that have memory.  All on-line nodes should have
2086  * registered their associated device by this time.
2087  */
2088 static void __init hugetlb_register_all_nodes(void)
2089 {
2090         int nid;
2091
2092         for_each_node_state(nid, N_MEMORY) {
2093                 struct node *node = node_devices[nid];
2094                 if (node->dev.id == nid)
2095                         hugetlb_register_node(node);
2096         }
2097
2098         /*
2099          * Let the node device driver know we're here so it can
2100          * [un]register hstate attributes on node hotplug.
2101          */
2102         register_hugetlbfs_with_node(hugetlb_register_node,
2103                                      hugetlb_unregister_node);
2104 }
2105 #else   /* !CONFIG_NUMA */
2106
2107 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2108 {
2109         BUG();
2110         if (nidp)
2111                 *nidp = -1;
2112         return NULL;
2113 }
2114
2115 static void hugetlb_unregister_all_nodes(void) { }
2116
2117 static void hugetlb_register_all_nodes(void) { }
2118
2119 #endif
2120
2121 static void __exit hugetlb_exit(void)
2122 {
2123         struct hstate *h;
2124
2125         hugetlb_unregister_all_nodes();
2126
2127         for_each_hstate(h) {
2128                 kobject_put(hstate_kobjs[hstate_index(h)]);
2129         }
2130
2131         kobject_put(hugepages_kobj);
2132         kfree(htlb_fault_mutex_table);
2133 }
2134 module_exit(hugetlb_exit);
2135
2136 static int __init hugetlb_init(void)
2137 {
2138         int i;
2139
2140         if (!hugepages_supported())
2141                 return 0;
2142
2143         if (!size_to_hstate(default_hstate_size)) {
2144                 default_hstate_size = HPAGE_SIZE;
2145                 if (!size_to_hstate(default_hstate_size))
2146                         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2147         }
2148         default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2149         if (default_hstate_max_huge_pages)
2150                 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2151
2152         hugetlb_init_hstates();
2153         gather_bootmem_prealloc();
2154         report_hugepages();
2155
2156         hugetlb_sysfs_init();
2157         hugetlb_register_all_nodes();
2158         hugetlb_cgroup_file_init();
2159
2160 #ifdef CONFIG_SMP
2161         num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2162 #else
2163         num_fault_mutexes = 1;
2164 #endif
2165         htlb_fault_mutex_table =
2166                 kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2167         BUG_ON(!htlb_fault_mutex_table);
2168
2169         for (i = 0; i < num_fault_mutexes; i++)
2170                 mutex_init(&htlb_fault_mutex_table[i]);
2171         return 0;
2172 }
2173 module_init(hugetlb_init);
2174
2175 /* Should be called on processing a hugepagesz=... option */
2176 void __init hugetlb_add_hstate(unsigned order)
2177 {
2178         struct hstate *h;
2179         unsigned long i;
2180
2181         if (size_to_hstate(PAGE_SIZE << order)) {
2182                 pr_warning("hugepagesz= specified twice, ignoring\n");
2183                 return;
2184         }
2185         BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2186         BUG_ON(order == 0);
2187         h = &hstates[hugetlb_max_hstate++];
2188         h->order = order;
2189         h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2190         h->nr_huge_pages = 0;
2191         h->free_huge_pages = 0;
2192         for (i = 0; i < MAX_NUMNODES; ++i)
2193                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2194         INIT_LIST_HEAD(&h->hugepage_activelist);
2195         h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
2196         h->next_nid_to_free = first_node(node_states[N_MEMORY]);
2197         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2198                                         huge_page_size(h)/1024);
2199
2200         parsed_hstate = h;
2201 }
2202
2203 static int __init hugetlb_nrpages_setup(char *s)
2204 {
2205         unsigned long *mhp;
2206         static unsigned long *last_mhp;
2207
2208         /*
2209          * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2210          * so this hugepages= parameter goes to the "default hstate".
2211          */
2212         if (!hugetlb_max_hstate)
2213                 mhp = &default_hstate_max_huge_pages;
2214         else
2215                 mhp = &parsed_hstate->max_huge_pages;
2216
2217         if (mhp == last_mhp) {
2218                 pr_warning("hugepages= specified twice without "
2219                            "interleaving hugepagesz=, ignoring\n");
2220                 return 1;
2221         }
2222
2223         if (sscanf(s, "%lu", mhp) <= 0)
2224                 *mhp = 0;
2225
2226         /*
2227          * Global state is always initialized later in hugetlb_init.
2228          * But we need to allocate >= MAX_ORDER hstates here early to still
2229          * use the bootmem allocator.
2230          */
2231         if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2232                 hugetlb_hstate_alloc_pages(parsed_hstate);
2233
2234         last_mhp = mhp;
2235
2236         return 1;
2237 }
2238 __setup("hugepages=", hugetlb_nrpages_setup);
2239
2240 static int __init hugetlb_default_setup(char *s)
2241 {
2242         default_hstate_size = memparse(s, &s);
2243         return 1;
2244 }
2245 __setup("default_hugepagesz=", hugetlb_default_setup);
2246
2247 static unsigned int cpuset_mems_nr(unsigned int *array)
2248 {
2249         int node;
2250         unsigned int nr = 0;
2251
2252         for_each_node_mask(node, cpuset_current_mems_allowed)
2253                 nr += array[node];
2254
2255         return nr;
2256 }
2257
2258 #ifdef CONFIG_SYSCTL
2259 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2260                          struct ctl_table *table, int write,
2261                          void __user *buffer, size_t *length, loff_t *ppos)
2262 {
2263         struct hstate *h = &default_hstate;
2264         unsigned long tmp = h->max_huge_pages;
2265         int ret;
2266
2267         if (!hugepages_supported())
2268                 return -ENOTSUPP;
2269
2270         table->data = &tmp;
2271         table->maxlen = sizeof(unsigned long);
2272         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2273         if (ret)
2274                 goto out;
2275
2276         if (write)
2277                 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2278                                                   NUMA_NO_NODE, tmp, *length);
2279 out:
2280         return ret;
2281 }
2282
2283 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2284                           void __user *buffer, size_t *length, loff_t *ppos)
2285 {
2286
2287         return hugetlb_sysctl_handler_common(false, table, write,
2288                                                         buffer, length, ppos);
2289 }
2290
2291 #ifdef CONFIG_NUMA
2292 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2293                           void __user *buffer, size_t *length, loff_t *ppos)
2294 {
2295         return hugetlb_sysctl_handler_common(true, table, write,
2296                                                         buffer, length, ppos);
2297 }
2298 #endif /* CONFIG_NUMA */
2299
2300 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2301                         void __user *buffer,
2302                         size_t *length, loff_t *ppos)
2303 {
2304         struct hstate *h = &default_hstate;
2305         unsigned long tmp;
2306         int ret;
2307
2308         if (!hugepages_supported())
2309                 return -ENOTSUPP;
2310
2311         tmp = h->nr_overcommit_huge_pages;
2312
2313         if (write && hstate_is_gigantic(h))
2314                 return -EINVAL;
2315
2316         table->data = &tmp;
2317         table->maxlen = sizeof(unsigned long);
2318         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2319         if (ret)
2320                 goto out;
2321
2322         if (write) {
2323                 spin_lock(&hugetlb_lock);
2324                 h->nr_overcommit_huge_pages = tmp;
2325                 spin_unlock(&hugetlb_lock);
2326         }
2327 out:
2328         return ret;
2329 }
2330
2331 #endif /* CONFIG_SYSCTL */
2332
2333 void hugetlb_report_meminfo(struct seq_file *m)
2334 {
2335         struct hstate *h = &default_hstate;
2336         if (!hugepages_supported())
2337                 return;
2338         seq_printf(m,
2339                         "HugePages_Total:   %5lu\n"
2340                         "HugePages_Free:    %5lu\n"
2341                         "HugePages_Rsvd:    %5lu\n"
2342                         "HugePages_Surp:    %5lu\n"
2343                         "Hugepagesize:   %8lu kB\n",
2344                         h->nr_huge_pages,
2345                         h->free_huge_pages,
2346                         h->resv_huge_pages,
2347                         h->surplus_huge_pages,
2348                         1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2349 }
2350
2351 int hugetlb_report_node_meminfo(int nid, char *buf)
2352 {
2353         struct hstate *h = &default_hstate;
2354         if (!hugepages_supported())
2355                 return 0;
2356         return sprintf(buf,
2357                 "Node %d HugePages_Total: %5u\n"
2358                 "Node %d HugePages_Free:  %5u\n"
2359                 "Node %d HugePages_Surp:  %5u\n",
2360                 nid, h->nr_huge_pages_node[nid],
2361                 nid, h->free_huge_pages_node[nid],
2362                 nid, h->surplus_huge_pages_node[nid]);
2363 }
2364
2365 void hugetlb_show_meminfo(void)
2366 {
2367         struct hstate *h;
2368         int nid;
2369
2370         if (!hugepages_supported())
2371                 return;
2372
2373         for_each_node_state(nid, N_MEMORY)
2374                 for_each_hstate(h)
2375                         pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
2376                                 nid,
2377                                 h->nr_huge_pages_node[nid],
2378                                 h->free_huge_pages_node[nid],
2379                                 h->surplus_huge_pages_node[nid],
2380                                 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2381 }
2382
2383 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2384 unsigned long hugetlb_total_pages(void)
2385 {
2386         struct hstate *h;
2387         unsigned long nr_total_pages = 0;
2388
2389         for_each_hstate(h)
2390                 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2391         return nr_total_pages;
2392 }
2393
2394 static int hugetlb_acct_memory(struct hstate *h, long delta)
2395 {
2396         int ret = -ENOMEM;
2397
2398         spin_lock(&hugetlb_lock);
2399         /*
2400          * When cpuset is configured, it breaks the strict hugetlb page
2401          * reservation as the accounting is done on a global variable. Such
2402          * reservation is completely rubbish in the presence of cpuset because
2403          * the reservation is not checked against page availability for the
2404          * current cpuset. Application can still potentially OOM'ed by kernel
2405          * with lack of free htlb page in cpuset that the task is in.
2406          * Attempt to enforce strict accounting with cpuset is almost
2407          * impossible (or too ugly) because cpuset is too fluid that
2408          * task or memory node can be dynamically moved between cpusets.
2409          *
2410          * The change of semantics for shared hugetlb mapping with cpuset is
2411          * undesirable. However, in order to preserve some of the semantics,
2412          * we fall back to check against current free page availability as
2413          * a best attempt and hopefully to minimize the impact of changing
2414          * semantics that cpuset has.
2415          */
2416         if (delta > 0) {
2417                 if (gather_surplus_pages(h, delta) < 0)
2418                         goto out;
2419
2420                 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2421                         return_unused_surplus_pages(h, delta);
2422                         goto out;
2423                 }
2424         }
2425
2426         ret = 0;
2427         if (delta < 0)
2428                 return_unused_surplus_pages(h, (unsigned long) -delta);
2429
2430 out:
2431         spin_unlock(&hugetlb_lock);
2432         return ret;
2433 }
2434
2435 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2436 {
2437         struct resv_map *resv = vma_resv_map(vma);
2438
2439         /*
2440          * This new VMA should share its siblings reservation map if present.
2441          * The VMA will only ever have a valid reservation map pointer where
2442          * it is being copied for another still existing VMA.  As that VMA
2443          * has a reference to the reservation map it cannot disappear until
2444          * after this open call completes.  It is therefore safe to take a
2445          * new reference here without additional locking.
2446          */
2447         if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2448                 kref_get(&resv->refs);
2449 }
2450
2451 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2452 {
2453         struct hstate *h = hstate_vma(vma);
2454         struct resv_map *resv = vma_resv_map(vma);
2455         struct hugepage_subpool *spool = subpool_vma(vma);
2456         unsigned long reserve, start, end;
2457
2458         if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2459                 return;
2460
2461         start = vma_hugecache_offset(h, vma, vma->vm_start);
2462         end = vma_hugecache_offset(h, vma, vma->vm_end);
2463
2464         reserve = (end - start) - region_count(resv, start, end);
2465
2466         kref_put(&resv->refs, resv_map_release);
2467
2468         if (reserve) {
2469                 hugetlb_acct_memory(h, -reserve);
2470                 hugepage_subpool_put_pages(spool, reserve);
2471         }
2472 }
2473
2474 /*
2475  * We cannot handle pagefaults against hugetlb pages at all.  They cause
2476  * handle_mm_fault() to try to instantiate regular-sized pages in the
2477  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2478  * this far.
2479  */
2480 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2481 {
2482         BUG();
2483         return 0;
2484 }
2485
2486 const struct vm_operations_struct hugetlb_vm_ops = {
2487         .fault = hugetlb_vm_op_fault,
2488         .open = hugetlb_vm_op_open,
2489         .close = hugetlb_vm_op_close,
2490 };
2491
2492 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2493                                 int writable)
2494 {
2495         pte_t entry;
2496
2497         if (writable) {
2498                 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
2499                                          vma->vm_page_prot)));
2500         } else {
2501                 entry = huge_pte_wrprotect(mk_huge_pte(page,
2502                                            vma->vm_page_prot));
2503         }
2504         entry = pte_mkyoung(entry);
2505         entry = pte_mkhuge(entry);
2506         entry = arch_make_huge_pte(entry, vma, page, writable);
2507
2508         return entry;
2509 }
2510
2511 static void set_huge_ptep_writable(struct vm_area_struct *vma,
2512                                    unsigned long address, pte_t *ptep)
2513 {
2514         pte_t entry;
2515
2516         entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
2517         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2518                 update_mmu_cache(vma, address, ptep);
2519 }
2520
2521 static int is_hugetlb_entry_migration(pte_t pte)
2522 {
2523         swp_entry_t swp;
2524
2525         if (huge_pte_none(pte) || pte_present(pte))
2526                 return 0;
2527         swp = pte_to_swp_entry(pte);
2528         if (non_swap_entry(swp) && is_migration_entry(swp))
2529                 return 1;
2530         else
2531                 return 0;
2532 }
2533
2534 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2535 {
2536         swp_entry_t swp;
2537
2538         if (huge_pte_none(pte) || pte_present(pte))
2539                 return 0;
2540         swp = pte_to_swp_entry(pte);
2541         if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2542                 return 1;
2543         else
2544                 return 0;
2545 }
2546
2547 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2548                             struct vm_area_struct *vma)
2549 {
2550         pte_t *src_pte, *dst_pte, entry;
2551         struct page *ptepage;
2552         unsigned long addr;
2553         int cow;
2554         struct hstate *h = hstate_vma(vma);
2555         unsigned long sz = huge_page_size(h);
2556         unsigned long mmun_start;       /* For mmu_notifiers */
2557         unsigned long mmun_end;         /* For mmu_notifiers */
2558         int ret = 0;
2559
2560         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2561
2562         mmun_start = vma->vm_start;
2563         mmun_end = vma->vm_end;
2564         if (cow)
2565                 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
2566
2567         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2568                 spinlock_t *src_ptl, *dst_ptl;
2569                 src_pte = huge_pte_offset(src, addr);
2570                 if (!src_pte)
2571                         continue;
2572                 dst_pte = huge_pte_alloc(dst, addr, sz);
2573                 if (!dst_pte) {
2574                         ret = -ENOMEM;
2575                         break;
2576                 }
2577
2578                 /* If the pagetables are shared don't copy or take references */
2579                 if (dst_pte == src_pte)
2580                         continue;
2581
2582                 dst_ptl = huge_pte_lock(h, dst, dst_pte);
2583                 src_ptl = huge_pte_lockptr(h, src, src_pte);
2584                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
2585                 entry = huge_ptep_get(src_pte);
2586                 if (huge_pte_none(entry)) { /* skip none entry */
2587                         ;
2588                 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
2589                                     is_hugetlb_entry_hwpoisoned(entry))) {
2590                         swp_entry_t swp_entry = pte_to_swp_entry(entry);
2591
2592                         if (is_write_migration_entry(swp_entry) && cow) {
2593                                 /*
2594                                  * COW mappings require pages in both
2595                                  * parent and child to be set to read.
2596                                  */
2597                                 make_migration_entry_read(&swp_entry);
2598                                 entry = swp_entry_to_pte(swp_entry);
2599                                 set_huge_pte_at(src, addr, src_pte, entry);
2600                         }
2601                         set_huge_pte_at(dst, addr, dst_pte, entry);
2602                 } else {
2603                         if (cow) {
2604                                 huge_ptep_set_wrprotect(src, addr, src_pte);
2605                                 mmu_notifier_invalidate_range(src, mmun_start,
2606                                                                    mmun_end);
2607                         }
2608                         entry = huge_ptep_get(src_pte);
2609                         ptepage = pte_page(entry);
2610                         get_page(ptepage);
2611                         page_dup_rmap(ptepage);
2612                         set_huge_pte_at(dst, addr, dst_pte, entry);
2613                 }
2614                 spin_unlock(src_ptl);
2615                 spin_unlock(dst_ptl);
2616         }
2617
2618         if (cow)
2619                 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
2620
2621         return ret;
2622 }
2623
2624 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2625                             unsigned long start, unsigned long end,
2626                             struct page *ref_page)
2627 {
2628         int force_flush = 0;
2629         struct mm_struct *mm = vma->vm_mm;
2630         unsigned long address;
2631         pte_t *ptep;
2632         pte_t pte;
2633         spinlock_t *ptl;
2634         struct page *page;
2635         struct hstate *h = hstate_vma(vma);
2636         unsigned long sz = huge_page_size(h);
2637         const unsigned long mmun_start = start; /* For mmu_notifiers */
2638         const unsigned long mmun_end   = end;   /* For mmu_notifiers */
2639
2640         WARN_ON(!is_vm_hugetlb_page(vma));
2641         BUG_ON(start & ~huge_page_mask(h));
2642         BUG_ON(end & ~huge_page_mask(h));
2643
2644         tlb_start_vma(tlb, vma);
2645         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2646         address = start;
2647 again:
2648         for (; address < end; address += sz) {
2649                 ptep = huge_pte_offset(mm, address);
2650                 if (!ptep)
2651                         continue;
2652
2653                 ptl = huge_pte_lock(h, mm, ptep);
2654                 if (huge_pmd_unshare(mm, &address, ptep))
2655                         goto unlock;
2656
2657                 pte = huge_ptep_get(ptep);
2658                 if (huge_pte_none(pte))
2659                         goto unlock;
2660
2661                 /*
2662                  * Migrating hugepage or HWPoisoned hugepage is already
2663                  * unmapped and its refcount is dropped, so just clear pte here.
2664                  */
2665                 if (unlikely(!pte_present(pte))) {
2666                         huge_pte_clear(mm, address, ptep);
2667                         goto unlock;
2668                 }
2669
2670                 page = pte_page(pte);
2671                 /*
2672                  * If a reference page is supplied, it is because a specific
2673                  * page is being unmapped, not a range. Ensure the page we
2674                  * are about to unmap is the actual page of interest.
2675                  */
2676                 if (ref_page) {
2677                         if (page != ref_page)
2678                                 goto unlock;
2679
2680                         /*
2681                          * Mark the VMA as having unmapped its page so that
2682                          * future faults in this VMA will fail rather than
2683                          * looking like data was lost
2684                          */
2685                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2686                 }
2687
2688                 pte = huge_ptep_get_and_clear(mm, address, ptep);
2689                 tlb_remove_tlb_entry(tlb, ptep, address);
2690                 if (huge_pte_dirty(pte))
2691                         set_page_dirty(page);
2692
2693                 page_remove_rmap(page);
2694                 force_flush = !__tlb_remove_page(tlb, page);
2695                 if (force_flush) {
2696                         address += sz;
2697                         spin_unlock(ptl);
2698                         break;
2699                 }
2700                 /* Bail out after unmapping reference page if supplied */
2701                 if (ref_page) {
2702                         spin_unlock(ptl);
2703                         break;
2704                 }
2705 unlock:
2706                 spin_unlock(ptl);
2707         }
2708         /*
2709          * mmu_gather ran out of room to batch pages, we break out of
2710          * the PTE lock to avoid doing the potential expensive TLB invalidate
2711          * and page-free while holding it.
2712          */
2713         if (force_flush) {
2714                 force_flush = 0;
2715                 tlb_flush_mmu(tlb);
2716                 if (address < end && !ref_page)
2717                         goto again;
2718         }
2719         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2720         tlb_end_vma(tlb, vma);
2721 }
2722
2723 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
2724                           struct vm_area_struct *vma, unsigned long start,
2725                           unsigned long end, struct page *ref_page)
2726 {
2727         __unmap_hugepage_range(tlb, vma, start, end, ref_page);
2728
2729         /*
2730          * Clear this flag so that x86's huge_pmd_share page_table_shareable
2731          * test will fail on a vma being torn down, and not grab a page table
2732          * on its way out.  We're lucky that the flag has such an appropriate
2733          * name, and can in fact be safely cleared here. We could clear it
2734          * before the __unmap_hugepage_range above, but all that's necessary
2735          * is to clear it before releasing the i_mmap_rwsem. This works
2736          * because in the context this is called, the VMA is about to be
2737          * destroyed and the i_mmap_rwsem is held.
2738          */
2739         vma->vm_flags &= ~VM_MAYSHARE;
2740 }
2741
2742 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2743                           unsigned long end, struct page *ref_page)
2744 {
2745         struct mm_struct *mm;
2746         struct mmu_gather tlb;
2747
2748         mm = vma->vm_mm;
2749
2750         tlb_gather_mmu(&tlb, mm, start, end);
2751         __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
2752         tlb_finish_mmu(&tlb, start, end);
2753 }
2754
2755 /*
2756  * This is called when the original mapper is failing to COW a MAP_PRIVATE
2757  * mappping it owns the reserve page for. The intention is to unmap the page
2758  * from other VMAs and let the children be SIGKILLed if they are faulting the
2759  * same region.
2760  */
2761 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2762                               struct page *page, unsigned long address)
2763 {
2764         struct hstate *h = hstate_vma(vma);
2765         struct vm_area_struct *iter_vma;
2766         struct address_space *mapping;
2767         pgoff_t pgoff;
2768
2769         /*
2770          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2771          * from page cache lookup which is in HPAGE_SIZE units.
2772          */
2773         address = address & huge_page_mask(h);
2774         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
2775                         vma->vm_pgoff;
2776         mapping = file_inode(vma->vm_file)->i_mapping;
2777
2778         /*
2779          * Take the mapping lock for the duration of the table walk. As
2780          * this mapping should be shared between all the VMAs,
2781          * __unmap_hugepage_range() is called as the lock is already held
2782          */
2783         i_mmap_lock_write(mapping);
2784         vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
2785                 /* Do not unmap the current VMA */
2786                 if (iter_vma == vma)
2787                         continue;
2788
2789                 /*
2790                  * Unmap the page from other VMAs without their own reserves.
2791                  * They get marked to be SIGKILLed if they fault in these
2792                  * areas. This is because a future no-page fault on this VMA
2793                  * could insert a zeroed page instead of the data existing
2794                  * from the time of fork. This would look like data corruption
2795                  */
2796                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2797                         unmap_hugepage_range(iter_vma, address,
2798                                              address + huge_page_size(h), page);
2799         }
2800         i_mmap_unlock_write(mapping);
2801 }
2802
2803 /*
2804  * Hugetlb_cow() should be called with page lock of the original hugepage held.
2805  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
2806  * cannot race with other handlers or page migration.
2807  * Keep the pte_same checks anyway to make transition from the mutex easier.
2808  */
2809 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2810                         unsigned long address, pte_t *ptep, pte_t pte,
2811                         struct page *pagecache_page, spinlock_t *ptl)
2812 {
2813         struct hstate *h = hstate_vma(vma);
2814         struct page *old_page, *new_page;
2815         int ret = 0, outside_reserve = 0;
2816         unsigned long mmun_start;       /* For mmu_notifiers */
2817         unsigned long mmun_end;         /* For mmu_notifiers */
2818
2819         old_page = pte_page(pte);
2820
2821 retry_avoidcopy:
2822         /* If no-one else is actually using this page, avoid the copy
2823          * and just make the page writable */
2824         if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
2825                 page_move_anon_rmap(old_page, vma, address);
2826                 set_huge_ptep_writable(vma, address, ptep);
2827                 return 0;
2828         }
2829
2830         /*
2831          * If the process that created a MAP_PRIVATE mapping is about to
2832          * perform a COW due to a shared page count, attempt to satisfy
2833          * the allocation without using the existing reserves. The pagecache
2834          * page is used to determine if the reserve at this address was
2835          * consumed or not. If reserves were used, a partial faulted mapping
2836          * at the time of fork() could consume its reserves on COW instead
2837          * of the full address range.
2838          */
2839         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2840                         old_page != pagecache_page)
2841                 outside_reserve = 1;
2842
2843         page_cache_get(old_page);
2844
2845         /*
2846          * Drop page table lock as buddy allocator may be called. It will
2847          * be acquired again before returning to the caller, as expected.
2848          */
2849         spin_unlock(ptl);
2850         new_page = alloc_huge_page(vma, address, outside_reserve);
2851
2852         if (IS_ERR(new_page)) {
2853                 /*
2854                  * If a process owning a MAP_PRIVATE mapping fails to COW,
2855                  * it is due to references held by a child and an insufficient
2856                  * huge page pool. To guarantee the original mappers
2857                  * reliability, unmap the page from child processes. The child
2858                  * may get SIGKILLed if it later faults.
2859                  */
2860                 if (outside_reserve) {
2861                         page_cache_release(old_page);
2862                         BUG_ON(huge_pte_none(pte));
2863                         unmap_ref_private(mm, vma, old_page, address);
2864                         BUG_ON(huge_pte_none(pte));
2865                         spin_lock(ptl);
2866                         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2867                         if (likely(ptep &&
2868                                    pte_same(huge_ptep_get(ptep), pte)))
2869                                 goto retry_avoidcopy;
2870                         /*
2871                          * race occurs while re-acquiring page table
2872                          * lock, and our job is done.
2873                          */
2874                         return 0;
2875                 }
2876
2877                 ret = (PTR_ERR(new_page) == -ENOMEM) ?
2878                         VM_FAULT_OOM : VM_FAULT_SIGBUS;
2879                 goto out_release_old;
2880         }
2881
2882         /*
2883          * When the original hugepage is shared one, it does not have
2884          * anon_vma prepared.
2885          */
2886         if (unlikely(anon_vma_prepare(vma))) {
2887                 ret = VM_FAULT_OOM;
2888                 goto out_release_all;
2889         }
2890
2891         copy_user_huge_page(new_page, old_page, address, vma,
2892                             pages_per_huge_page(h));
2893         __SetPageUptodate(new_page);
2894
2895         mmun_start = address & huge_page_mask(h);
2896         mmun_end = mmun_start + huge_page_size(h);
2897         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2898
2899         /*
2900          * Retake the page table lock to check for racing updates
2901          * before the page tables are altered
2902          */
2903         spin_lock(ptl);
2904         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2905         if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
2906                 ClearPagePrivate(new_page);
2907
2908                 /* Break COW */
2909                 huge_ptep_clear_flush(vma, address, ptep);
2910                 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
2911                 set_huge_pte_at(mm, address, ptep,
2912                                 make_huge_pte(vma, new_page, 1));
2913                 page_remove_rmap(old_page);
2914                 hugepage_add_new_anon_rmap(new_page, vma, address);
2915                 /* Make the old page be freed below */
2916                 new_page = old_page;
2917         }
2918         spin_unlock(ptl);
2919         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2920 out_release_all:
2921         page_cache_release(new_page);
2922 out_release_old:
2923         page_cache_release(old_page);
2924
2925         spin_lock(ptl); /* Caller expects lock to be held */
2926         return ret;
2927 }
2928
2929 /* Return the pagecache page at a given address within a VMA */
2930 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2931                         struct vm_area_struct *vma, unsigned long address)
2932 {
2933         struct address_space *mapping;
2934         pgoff_t idx;
2935
2936         mapping = vma->vm_file->f_mapping;
2937         idx = vma_hugecache_offset(h, vma, address);
2938
2939         return find_lock_page(mapping, idx);
2940 }
2941
2942 /*
2943  * Return whether there is a pagecache page to back given address within VMA.
2944  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
2945  */
2946 static bool hugetlbfs_pagecache_present(struct hstate *h,
2947                         struct vm_area_struct *vma, unsigned long address)
2948 {
2949         struct address_space *mapping;
2950         pgoff_t idx;
2951         struct page *page;
2952
2953         mapping = vma->vm_file->f_mapping;
2954         idx = vma_hugecache_offset(h, vma, address);
2955
2956         page = find_get_page(mapping, idx);
2957         if (page)
2958                 put_page(page);
2959         return page != NULL;
2960 }
2961
2962 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2963                            struct address_space *mapping, pgoff_t idx,
2964                            unsigned long address, pte_t *ptep, unsigned int flags)
2965 {
2966         struct hstate *h = hstate_vma(vma);
2967         int ret = VM_FAULT_SIGBUS;
2968         int anon_rmap = 0;
2969         unsigned long size;
2970         struct page *page;
2971         pte_t new_pte;
2972         spinlock_t *ptl;
2973
2974         /*
2975          * Currently, we are forced to kill the process in the event the
2976          * original mapper has unmapped pages from the child due to a failed
2977          * COW. Warn that such a situation has occurred as it may not be obvious
2978          */
2979         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2980                 pr_warning("PID %d killed due to inadequate hugepage pool\n",
2981                            current->pid);
2982                 return ret;
2983         }
2984
2985         /*
2986          * Use page lock to guard against racing truncation
2987          * before we get page_table_lock.
2988          */
2989 retry:
2990         page = find_lock_page(mapping, idx);
2991         if (!page) {
2992                 size = i_size_read(mapping->host) >> huge_page_shift(h);
2993                 if (idx >= size)
2994                         goto out;
2995                 page = alloc_huge_page(vma, address, 0);
2996                 if (IS_ERR(page)) {
2997                         ret = PTR_ERR(page);
2998                         if (ret == -ENOMEM)
2999                                 ret = VM_FAULT_OOM;
3000                         else
3001                                 ret = VM_FAULT_SIGBUS;
3002                         goto out;
3003                 }
3004                 clear_huge_page(page, address, pages_per_huge_page(h));
3005                 __SetPageUptodate(page);
3006
3007                 if (vma->vm_flags & VM_MAYSHARE) {
3008                         int err;
3009                         struct inode *inode = mapping->host;
3010
3011                         err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3012                         if (err) {
3013                                 put_page(page);
3014                                 if (err == -EEXIST)
3015                                         goto retry;
3016                                 goto out;
3017                         }
3018                         ClearPagePrivate(page);
3019
3020                         spin_lock(&inode->i_lock);
3021                         inode->i_blocks += blocks_per_huge_page(h);
3022                         spin_unlock(&inode->i_lock);
3023                 } else {
3024                         lock_page(page);
3025                         if (unlikely(anon_vma_prepare(vma))) {
3026                                 ret = VM_FAULT_OOM;
3027                                 goto backout_unlocked;
3028                         }
3029                         anon_rmap = 1;
3030                 }
3031         } else {
3032                 /*
3033                  * If memory error occurs between mmap() and fault, some process
3034                  * don't have hwpoisoned swap entry for errored virtual address.
3035                  * So we need to block hugepage fault by PG_hwpoison bit check.
3036                  */
3037                 if (unlikely(PageHWPoison(page))) {
3038                         ret = VM_FAULT_HWPOISON |
3039                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3040                         goto backout_unlocked;
3041                 }
3042         }
3043
3044         /*
3045          * If we are going to COW a private mapping later, we examine the
3046          * pending reservations for this page now. This will ensure that
3047          * any allocations necessary to record that reservation occur outside
3048          * the spinlock.
3049          */
3050         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
3051                 if (vma_needs_reservation(h, vma, address) < 0) {
3052                         ret = VM_FAULT_OOM;
3053                         goto backout_unlocked;
3054                 }
3055
3056         ptl = huge_pte_lockptr(h, mm, ptep);
3057         spin_lock(ptl);
3058         size = i_size_read(mapping->host) >> huge_page_shift(h);
3059         if (idx >= size)
3060                 goto backout;
3061
3062         ret = 0;
3063         if (!huge_pte_none(huge_ptep_get(ptep)))
3064                 goto backout;
3065
3066         if (anon_rmap) {
3067                 ClearPagePrivate(page);
3068                 hugepage_add_new_anon_rmap(page, vma, address);
3069         } else
3070                 page_dup_rmap(page);
3071         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3072                                 && (vma->vm_flags & VM_SHARED)));
3073         set_huge_pte_at(mm, address, ptep, new_pte);
3074
3075         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3076                 /* Optimization, do the COW without a second fault */
3077                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
3078         }
3079
3080         spin_unlock(ptl);
3081         unlock_page(page);
3082 out:
3083         return ret;
3084
3085 backout:
3086         spin_unlock(ptl);
3087 backout_unlocked:
3088         unlock_page(page);
3089         put_page(page);
3090         goto out;
3091 }
3092
3093 #ifdef CONFIG_SMP
3094 static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3095                             struct vm_area_struct *vma,
3096                             struct address_space *mapping,
3097                             pgoff_t idx, unsigned long address)
3098 {
3099         unsigned long key[2];
3100         u32 hash;
3101
3102         if (vma->vm_flags & VM_SHARED) {
3103                 key[0] = (unsigned long) mapping;
3104                 key[1] = idx;
3105         } else {
3106                 key[0] = (unsigned long) mm;
3107                 key[1] = address >> huge_page_shift(h);
3108         }
3109
3110         hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3111
3112         return hash & (num_fault_mutexes - 1);
3113 }
3114 #else
3115 /*
3116  * For uniprocesor systems we always use a single mutex, so just
3117  * return 0 and avoid the hashing overhead.
3118  */
3119 static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3120                             struct vm_area_struct *vma,
3121                             struct address_space *mapping,
3122                             pgoff_t idx, unsigned long address)
3123 {
3124         return 0;
3125 }
3126 #endif
3127
3128 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3129                         unsigned long address, unsigned int flags)
3130 {
3131         pte_t *ptep, entry;
3132         spinlock_t *ptl;
3133         int ret;
3134         u32 hash;
3135         pgoff_t idx;
3136         struct page *page = NULL;
3137         struct page *pagecache_page = NULL;
3138         struct hstate *h = hstate_vma(vma);
3139         struct address_space *mapping;
3140         int need_wait_lock = 0;
3141
3142         address &= huge_page_mask(h);
3143
3144         ptep = huge_pte_offset(mm, address);
3145         if (ptep) {
3146                 entry = huge_ptep_get(ptep);
3147                 if (unlikely(is_hugetlb_entry_migration(entry))) {
3148                         migration_entry_wait_huge(vma, mm, ptep);
3149                         return 0;
3150                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3151                         return VM_FAULT_HWPOISON_LARGE |
3152                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3153         }
3154
3155         ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3156         if (!ptep)
3157                 return VM_FAULT_OOM;
3158
3159         mapping = vma->vm_file->f_mapping;
3160         idx = vma_hugecache_offset(h, vma, address);
3161
3162         /*
3163          * Serialize hugepage allocation and instantiation, so that we don't
3164          * get spurious allocation failures if two CPUs race to instantiate
3165          * the same page in the page cache.
3166          */
3167         hash = fault_mutex_hash(h, mm, vma, mapping, idx, address);
3168         mutex_lock(&htlb_fault_mutex_table[hash]);
3169
3170         entry = huge_ptep_get(ptep);
3171         if (huge_pte_none(entry)) {
3172                 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3173                 goto out_mutex;
3174         }
3175
3176         ret = 0;
3177
3178         /*
3179          * entry could be a migration/hwpoison entry at this point, so this
3180          * check prevents the kernel from going below assuming that we have
3181          * a active hugepage in pagecache. This goto expects the 2nd page fault,
3182          * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
3183          * handle it.
3184          */
3185         if (!pte_present(entry))
3186                 goto out_mutex;
3187
3188         /*
3189          * If we are going to COW the mapping later, we examine the pending
3190          * reservations for this page now. This will ensure that any
3191          * allocations necessary to record that reservation occur outside the
3192          * spinlock. For private mappings, we also lookup the pagecache
3193          * page now as it is used to determine if a reservation has been
3194          * consumed.
3195          */
3196         if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3197                 if (vma_needs_reservation(h, vma, address) < 0) {
3198                         ret = VM_FAULT_OOM;
3199                         goto out_mutex;
3200                 }
3201
3202                 if (!(vma->vm_flags & VM_MAYSHARE))
3203                         pagecache_page = hugetlbfs_pagecache_page(h,
3204                                                                 vma, address);
3205         }
3206
3207         ptl = huge_pte_lock(h, mm, ptep);
3208
3209         /* Check for a racing update before calling hugetlb_cow */
3210         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3211                 goto out_ptl;
3212
3213         /*
3214          * hugetlb_cow() requires page locks of pte_page(entry) and
3215          * pagecache_page, so here we need take the former one
3216          * when page != pagecache_page or !pagecache_page.
3217          */
3218         page = pte_page(entry);
3219         if (page != pagecache_page)
3220                 if (!trylock_page(page)) {
3221                         need_wait_lock = 1;
3222                         goto out_ptl;
3223                 }
3224
3225         get_page(page);
3226
3227         if (flags & FAULT_FLAG_WRITE) {
3228                 if (!huge_pte_write(entry)) {
3229                         ret = hugetlb_cow(mm, vma, address, ptep, entry,
3230                                         pagecache_page, ptl);
3231                         goto out_put_page;
3232                 }
3233                 entry = huge_pte_mkdirty(entry);
3234         }
3235         entry = pte_mkyoung(entry);
3236         if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3237                                                 flags & FAULT_FLAG_WRITE))
3238                 update_mmu_cache(vma, address, ptep);
3239 out_put_page:
3240         if (page != pagecache_page)
3241                 unlock_page(page);
3242         put_page(page);
3243 out_ptl:
3244         spin_unlock(ptl);
3245
3246         if (pagecache_page) {
3247                 unlock_page(pagecache_page);
3248                 put_page(pagecache_page);
3249         }
3250 out_mutex:
3251         mutex_unlock(&htlb_fault_mutex_table[hash]);
3252         /*
3253          * Generally it's safe to hold refcount during waiting page lock. But
3254          * here we just wait to defer the next page fault to avoid busy loop and
3255          * the page is not used after unlocked before returning from the current
3256          * page fault. So we are safe from accessing freed page, even if we wait
3257          * here without taking refcount.
3258          */
3259         if (need_wait_lock)
3260                 wait_on_page_locked(page);
3261         return ret;
3262 }
3263
3264 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3265                          struct page **pages, struct vm_area_struct **vmas,
3266                          unsigned long *position, unsigned long *nr_pages,
3267                          long i, unsigned int flags)
3268 {
3269         unsigned long pfn_offset;
3270         unsigned long vaddr = *position;
3271         unsigned long remainder = *nr_pages;
3272         struct hstate *h = hstate_vma(vma);
3273
3274         while (vaddr < vma->vm_end && remainder) {
3275                 pte_t *pte;
3276                 spinlock_t *ptl = NULL;
3277                 int absent;
3278                 struct page *page;
3279
3280                 /*
3281                  * Some archs (sparc64, sh*) have multiple pte_ts to
3282                  * each hugepage.  We have to make sure we get the
3283                  * first, for the page indexing below to work.
3284                  *
3285                  * Note that page table lock is not held when pte is null.
3286                  */
3287                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3288                 if (pte)
3289                         ptl = huge_pte_lock(h, mm, pte);
3290                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
3291
3292                 /*
3293                  * When coredumping, it suits get_dump_page if we just return
3294                  * an error where there's an empty slot with no huge pagecache
3295                  * to back it.  This way, we avoid allocating a hugepage, and
3296                  * the sparse dumpfile avoids allocating disk blocks, but its
3297                  * huge holes still show up with zeroes where they need to be.
3298                  */
3299                 if (absent && (flags & FOLL_DUMP) &&
3300                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3301                         if (pte)
3302                                 spin_unlock(ptl);
3303                         remainder = 0;
3304                         break;
3305                 }
3306
3307                 /*
3308                  * We need call hugetlb_fault for both hugepages under migration
3309                  * (in which case hugetlb_fault waits for the migration,) and
3310                  * hwpoisoned hugepages (in which case we need to prevent the
3311                  * caller from accessing to them.) In order to do this, we use
3312                  * here is_swap_pte instead of is_hugetlb_entry_migration and
3313                  * is_hugetlb_entry_hwpoisoned. This is because it simply covers
3314                  * both cases, and because we can't follow correct pages
3315                  * directly from any kind of swap entries.
3316                  */
3317                 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
3318                     ((flags & FOLL_WRITE) &&
3319                       !huge_pte_write(huge_ptep_get(pte)))) {
3320                         int ret;
3321
3322                         if (pte)
3323                                 spin_unlock(ptl);
3324                         ret = hugetlb_fault(mm, vma, vaddr,
3325                                 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
3326                         if (!(ret & VM_FAULT_ERROR))
3327                                 continue;
3328
3329                         remainder = 0;
3330                         break;
3331                 }
3332
3333                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
3334                 page = pte_page(huge_ptep_get(pte));
3335 same_page:
3336                 if (pages) {
3337                         pages[i] = mem_map_offset(page, pfn_offset);
3338                         get_page_foll(pages[i]);
3339                 }
3340
3341                 if (vmas)
3342                         vmas[i] = vma;
3343
3344                 vaddr += PAGE_SIZE;
3345                 ++pfn_offset;
3346                 --remainder;
3347                 ++i;
3348                 if (vaddr < vma->vm_end && remainder &&
3349                                 pfn_offset < pages_per_huge_page(h)) {
3350                         /*
3351                          * We use pfn_offset to avoid touching the pageframes
3352                          * of this compound page.
3353                          */
3354                         goto same_page;
3355                 }
3356                 spin_unlock(ptl);
3357         }
3358         *nr_pages = remainder;
3359         *position = vaddr;
3360
3361         return i ? i : -EFAULT;
3362 }
3363
3364 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3365                 unsigned long address, unsigned long end, pgprot_t newprot)
3366 {
3367         struct mm_struct *mm = vma->vm_mm;
3368         unsigned long start = address;
3369         pte_t *ptep;
3370         pte_t pte;
3371         struct hstate *h = hstate_vma(vma);
3372         unsigned long pages = 0;
3373
3374         BUG_ON(address >= end);
3375         flush_cache_range(vma, address, end);
3376
3377         mmu_notifier_invalidate_range_start(mm, start, end);
3378         i_mmap_lock_write(vma->vm_file->f_mapping);
3379         for (; address < end; address += huge_page_size(h)) {
3380                 spinlock_t *ptl;
3381                 ptep = huge_pte_offset(mm, address);
3382                 if (!ptep)
3383                         continue;
3384                 ptl = huge_pte_lock(h, mm, ptep);
3385                 if (huge_pmd_unshare(mm, &address, ptep)) {
3386                         pages++;
3387                         spin_unlock(ptl);
3388                         continue;
3389                 }
3390                 pte = huge_ptep_get(ptep);
3391                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
3392                         spin_unlock(ptl);
3393                         continue;
3394                 }
3395                 if (unlikely(is_hugetlb_entry_migration(pte))) {
3396                         swp_entry_t entry = pte_to_swp_entry(pte);
3397
3398                         if (is_write_migration_entry(entry)) {
3399                                 pte_t newpte;
3400
3401                                 make_migration_entry_read(&entry);
3402                                 newpte = swp_entry_to_pte(entry);
3403                                 set_huge_pte_at(mm, address, ptep, newpte);
3404                                 pages++;
3405                         }
3406                         spin_unlock(ptl);
3407                         continue;
3408                 }
3409                 if (!huge_pte_none(pte)) {
3410                         pte = huge_ptep_get_and_clear(mm, address, ptep);
3411                         pte = pte_mkhuge(huge_pte_modify(pte, newprot));
3412                         pte = arch_make_huge_pte(pte, vma, NULL, 0);
3413                         set_huge_pte_at(mm, address, ptep, pte);
3414                         pages++;
3415                 }
3416                 spin_unlock(ptl);
3417         }
3418         /*
3419          * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
3420          * may have cleared our pud entry and done put_page on the page table:
3421          * once we release i_mmap_rwsem, another task can do the final put_page
3422          * and that page table be reused and filled with junk.
3423          */
3424         flush_tlb_range(vma, start, end);
3425         mmu_notifier_invalidate_range(mm, start, end);
3426         i_mmap_unlock_write(vma->vm_file->f_mapping);
3427         mmu_notifier_invalidate_range_end(mm, start, end);
3428
3429         return pages << h->order;
3430 }
3431
3432 int hugetlb_reserve_pages(struct inode *inode,
3433                                         long from, long to,
3434                                         struct vm_area_struct *vma,
3435                                         vm_flags_t vm_flags)
3436 {
3437         long ret, chg;
3438         struct hstate *h = hstate_inode(inode);
3439         struct hugepage_subpool *spool = subpool_inode(inode);
3440         struct resv_map *resv_map;
3441
3442         /*
3443          * Only apply hugepage reservation if asked. At fault time, an
3444          * attempt will be made for VM_NORESERVE to allocate a page
3445          * without using reserves
3446          */
3447         if (vm_flags & VM_NORESERVE)
3448                 return 0;
3449
3450         /*
3451          * Shared mappings base their reservation on the number of pages that
3452          * are already allocated on behalf of the file. Private mappings need
3453          * to reserve the full area even if read-only as mprotect() may be
3454          * called to make the mapping read-write. Assume !vma is a shm mapping
3455          */
3456         if (!vma || vma->vm_flags & VM_MAYSHARE) {
3457                 resv_map = inode_resv_map(inode);
3458
3459                 chg = region_chg(resv_map, from, to);
3460
3461         } else {
3462                 resv_map = resv_map_alloc();
3463                 if (!resv_map)
3464                         return -ENOMEM;
3465
3466                 chg = to - from;
3467
3468                 set_vma_resv_map(vma, resv_map);
3469                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
3470         }
3471
3472         if (chg < 0) {
3473                 ret = chg;
3474                 goto out_err;
3475         }
3476
3477         /* There must be enough pages in the subpool for the mapping */
3478         if (hugepage_subpool_get_pages(spool, chg)) {
3479                 ret = -ENOSPC;
3480                 goto out_err;
3481         }
3482
3483         /*
3484          * Check enough hugepages are available for the reservation.
3485          * Hand the pages back to the subpool if there are not
3486          */
3487         ret = hugetlb_acct_memory(h, chg);
3488         if (ret < 0) {
3489                 hugepage_subpool_put_pages(spool, chg);
3490                 goto out_err;
3491         }
3492
3493         /*
3494          * Account for the reservations made. Shared mappings record regions
3495          * that have reservations as they are shared by multiple VMAs.
3496          * When the last VMA disappears, the region map says how much
3497          * the reservation was and the page cache tells how much of
3498          * the reservation was consumed. Private mappings are per-VMA and
3499          * only the consumed reservations are tracked. When the VMA
3500          * disappears, the original reservation is the VMA size and the
3501          * consumed reservations are stored in the map. Hence, nothing
3502          * else has to be done for private mappings here
3503          */
3504         if (!vma || vma->vm_flags & VM_MAYSHARE)
3505                 region_add(resv_map, from, to);
3506         return 0;
3507 out_err:
3508         if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3509                 kref_put(&resv_map->refs, resv_map_release);
3510         return ret;
3511 }
3512
3513 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
3514 {
3515         struct hstate *h = hstate_inode(inode);
3516         struct resv_map *resv_map = inode_resv_map(inode);
3517         long chg = 0;
3518         struct hugepage_subpool *spool = subpool_inode(inode);
3519
3520         if (resv_map)
3521                 chg = region_truncate(resv_map, offset);
3522         spin_lock(&inode->i_lock);
3523         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
3524         spin_unlock(&inode->i_lock);
3525
3526         hugepage_subpool_put_pages(spool, (chg - freed));
3527         hugetlb_acct_memory(h, -(chg - freed));
3528 }
3529
3530 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
3531 static unsigned long page_table_shareable(struct vm_area_struct *svma,
3532                                 struct vm_area_struct *vma,
3533                                 unsigned long addr, pgoff_t idx)
3534 {
3535         unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
3536                                 svma->vm_start;
3537         unsigned long sbase = saddr & PUD_MASK;
3538         unsigned long s_end = sbase + PUD_SIZE;
3539
3540         /* Allow segments to share if only one is marked locked */
3541         unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
3542         unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
3543
3544         /*
3545          * match the virtual addresses, permission and the alignment of the
3546          * page table page.
3547          */
3548         if (pmd_index(addr) != pmd_index(saddr) ||
3549             vm_flags != svm_flags ||
3550             sbase < svma->vm_start || svma->vm_end < s_end)
3551                 return 0;
3552
3553         return saddr;
3554 }
3555
3556 static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
3557 {
3558         unsigned long base = addr & PUD_MASK;
3559         unsigned long end = base + PUD_SIZE;
3560
3561         /*
3562          * check on proper vm_flags and page table alignment
3563          */
3564         if (vma->vm_flags & VM_MAYSHARE &&
3565             vma->vm_start <= base && end <= vma->vm_end)
3566                 return 1;
3567         return 0;
3568 }
3569
3570 /*
3571  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
3572  * and returns the corresponding pte. While this is not necessary for the
3573  * !shared pmd case because we can allocate the pmd later as well, it makes the
3574  * code much cleaner. pmd allocation is essential for the shared case because
3575  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
3576  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
3577  * bad pmd for sharing.
3578  */
3579 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3580 {
3581         struct vm_area_struct *vma = find_vma(mm, addr);
3582         struct address_space *mapping = vma->vm_file->f_mapping;
3583         pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
3584                         vma->vm_pgoff;
3585         struct vm_area_struct *svma;
3586         unsigned long saddr;
3587         pte_t *spte = NULL;
3588         pte_t *pte;
3589         spinlock_t *ptl;
3590
3591         if (!vma_shareable(vma, addr))
3592                 return (pte_t *)pmd_alloc(mm, pud, addr);
3593
3594         i_mmap_lock_write(mapping);
3595         vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
3596                 if (svma == vma)
3597                         continue;
3598
3599                 saddr = page_table_shareable(svma, vma, addr, idx);
3600                 if (saddr) {
3601                         spte = huge_pte_offset(svma->vm_mm, saddr);
3602                         if (spte) {
3603                                 mm_inc_nr_pmds(mm);
3604                                 get_page(virt_to_page(spte));
3605                                 break;
3606                         }
3607                 }
3608         }
3609
3610         if (!spte)
3611                 goto out;
3612
3613         ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
3614         spin_lock(ptl);
3615         if (pud_none(*pud)) {
3616                 pud_populate(mm, pud,
3617                                 (pmd_t *)((unsigned long)spte & PAGE_MASK));
3618         } else {
3619                 put_page(virt_to_page(spte));
3620                 mm_inc_nr_pmds(mm);
3621         }
3622         spin_unlock(ptl);
3623 out:
3624         pte = (pte_t *)pmd_alloc(mm, pud, addr);
3625         i_mmap_unlock_write(mapping);
3626         return pte;
3627 }
3628
3629 /*
3630  * unmap huge page backed by shared pte.
3631  *
3632  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
3633  * indicated by page_count > 1, unmap is achieved by clearing pud and
3634  * decrementing the ref count. If count == 1, the pte page is not shared.
3635  *
3636  * called with page table lock held.
3637  *
3638  * returns: 1 successfully unmapped a shared pte page
3639  *          0 the underlying pte page is not shared, or it is the last user
3640  */
3641 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
3642 {
3643         pgd_t *pgd = pgd_offset(mm, *addr);
3644         pud_t *pud = pud_offset(pgd, *addr);
3645
3646         BUG_ON(page_count(virt_to_page(ptep)) == 0);
3647         if (page_count(virt_to_page(ptep)) == 1)
3648                 return 0;
3649
3650         pud_clear(pud);
3651         put_page(virt_to_page(ptep));
3652         mm_dec_nr_pmds(mm);
3653         *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
3654         return 1;
3655 }
3656 #define want_pmd_share()        (1)
3657 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
3658 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3659 {
3660         return NULL;
3661 }
3662 #define want_pmd_share()        (0)
3663 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
3664
3665 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
3666 pte_t *huge_pte_alloc(struct mm_struct *mm,
3667                         unsigned long addr, unsigned long sz)
3668 {
3669         pgd_t *pgd;
3670         pud_t *pud;
3671         pte_t *pte = NULL;
3672
3673         pgd = pgd_offset(mm, addr);
3674         pud = pud_alloc(mm, pgd, addr);
3675         if (pud) {
3676                 if (sz == PUD_SIZE) {
3677                         pte = (pte_t *)pud;
3678                 } else {
3679                         BUG_ON(sz != PMD_SIZE);
3680                         if (want_pmd_share() && pud_none(*pud))
3681                                 pte = huge_pmd_share(mm, addr, pud);
3682                         else
3683                                 pte = (pte_t *)pmd_alloc(mm, pud, addr);
3684                 }
3685         }
3686         BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
3687
3688         return pte;
3689 }
3690
3691 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
3692 {
3693         pgd_t *pgd;
3694         pud_t *pud;
3695         pmd_t *pmd = NULL;
3696
3697         pgd = pgd_offset(mm, addr);
3698         if (pgd_present(*pgd)) {
3699                 pud = pud_offset(pgd, addr);
3700                 if (pud_present(*pud)) {
3701                         if (pud_huge(*pud))
3702                                 return (pte_t *)pud;
3703                         pmd = pmd_offset(pud, addr);
3704                 }
3705         }
3706         return (pte_t *) pmd;
3707 }
3708
3709 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
3710
3711 /*
3712  * These functions are overwritable if your architecture needs its own
3713  * behavior.
3714  */
3715 struct page * __weak
3716 follow_huge_addr(struct mm_struct *mm, unsigned long address,
3717                               int write)
3718 {
3719         return ERR_PTR(-EINVAL);
3720 }
3721
3722 struct page * __weak
3723 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
3724                 pmd_t *pmd, int flags)
3725 {
3726         struct page *page = NULL;
3727         spinlock_t *ptl;
3728 retry:
3729         ptl = pmd_lockptr(mm, pmd);
3730         spin_lock(ptl);
3731         /*
3732          * make sure that the address range covered by this pmd is not
3733          * unmapped from other threads.
3734          */
3735         if (!pmd_huge(*pmd))
3736                 goto out;
3737         if (pmd_present(*pmd)) {
3738                 page = pte_page(*(pte_t *)pmd) +
3739                         ((address & ~PMD_MASK) >> PAGE_SHIFT);
3740                 if (flags & FOLL_GET)
3741                         get_page(page);
3742         } else {
3743                 if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
3744                         spin_unlock(ptl);
3745                         __migration_entry_wait(mm, (pte_t *)pmd, ptl);
3746                         goto retry;
3747                 }
3748                 /*
3749                  * hwpoisoned entry is treated as no_page_table in
3750                  * follow_page_mask().
3751                  */
3752         }
3753 out:
3754         spin_unlock(ptl);
3755         return page;
3756 }
3757
3758 struct page * __weak
3759 follow_huge_pud(struct mm_struct *mm, unsigned long address,
3760                 pud_t *pud, int flags)
3761 {
3762         if (flags & FOLL_GET)
3763                 return NULL;
3764
3765         return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
3766 }
3767
3768 #ifdef CONFIG_MEMORY_FAILURE
3769
3770 /* Should be called in hugetlb_lock */
3771 static int is_hugepage_on_freelist(struct page *hpage)
3772 {
3773         struct page *page;
3774         struct page *tmp;
3775         struct hstate *h = page_hstate(hpage);
3776         int nid = page_to_nid(hpage);
3777
3778         list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
3779                 if (page == hpage)
3780                         return 1;
3781         return 0;
3782 }
3783
3784 /*
3785  * This function is called from memory failure code.
3786  * Assume the caller holds page lock of the head page.
3787  */
3788 int dequeue_hwpoisoned_huge_page(struct page *hpage)
3789 {
3790         struct hstate *h = page_hstate(hpage);
3791         int nid = page_to_nid(hpage);
3792         int ret = -EBUSY;
3793
3794         spin_lock(&hugetlb_lock);
3795         if (is_hugepage_on_freelist(hpage)) {
3796                 /*
3797                  * Hwpoisoned hugepage isn't linked to activelist or freelist,
3798                  * but dangling hpage->lru can trigger list-debug warnings
3799                  * (this happens when we call unpoison_memory() on it),
3800                  * so let it point to itself with list_del_init().
3801                  */
3802                 list_del_init(&hpage->lru);
3803                 set_page_refcounted(hpage);
3804                 h->free_huge_pages--;
3805                 h->free_huge_pages_node[nid]--;
3806                 ret = 0;
3807         }
3808         spin_unlock(&hugetlb_lock);
3809         return ret;
3810 }
3811 #endif
3812
3813 bool isolate_huge_page(struct page *page, struct list_head *list)
3814 {
3815         VM_BUG_ON_PAGE(!PageHead(page), page);
3816         if (!get_page_unless_zero(page))
3817                 return false;
3818         spin_lock(&hugetlb_lock);
3819         list_move_tail(&page->lru, list);
3820         spin_unlock(&hugetlb_lock);
3821         return true;
3822 }
3823
3824 void putback_active_hugepage(struct page *page)
3825 {
3826         VM_BUG_ON_PAGE(!PageHead(page), page);
3827         spin_lock(&hugetlb_lock);
3828         list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
3829         spin_unlock(&hugetlb_lock);
3830         put_page(page);
3831 }
3832
3833 bool is_hugepage_active(struct page *page)
3834 {
3835         VM_BUG_ON_PAGE(!PageHuge(page), page);
3836         /*
3837          * This function can be called for a tail page because the caller,
3838          * scan_movable_pages, scans through a given pfn-range which typically
3839          * covers one memory block. In systems using gigantic hugepage (1GB
3840          * for x86_64,) a hugepage is larger than a memory block, and we don't
3841          * support migrating such large hugepages for now, so return false
3842          * when called for tail pages.
3843          */
3844         if (PageTail(page))
3845                 return false;
3846         /*
3847          * Refcount of a hwpoisoned hugepages is 1, but they are not active,
3848          * so we should return false for them.
3849          */
3850         if (unlikely(PageHWPoison(page)))
3851                 return false;
3852         return page_count(page) > 0;
3853 }