/freebsd-current/contrib/mandoc/ |
H A D | dba_read.c | 45 int32_t im, ip, iv, npages; local 49 npages = dbm_page_count(); 50 dba = dba_new(npages < 128 ? 128 : npages); 51 for (ip = 0; ip < npages; ip++) {
|
H A D | dbm.c | 69 static int32_t npages; variable 94 if ((npages = be32toh(*dbm_getint(4))) < 0) { 96 fname, npages); 139 return npages; 151 assert(ip < npages); 265 ip = npages; 273 while (ip < npages) { 289 if (ip == npages) { 303 if (++ip < npages) { 331 for ( ; ip < npages; i [all...] |
/freebsd-current/sys/dev/mlx5/mlx5_core/ |
H A D | mlx5_pagealloc.c | 40 s32 npages; member in struct:mlx5_pages_req 298 s32 *npages, int boot) 313 *npages = MLX5_GET(query_pages_out, out, num_pages); 319 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, argument 329 inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]); 337 for (i = 0; i < npages; i++) { 347 MLX5_SET(manage_pages_in, in, input_num_entries, npages); 351 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", 352 func_id, npages, err); 355 dev->priv.fw_pages += npages; 297 mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, s32 *npages, int boot) argument 391 u32 npages; local 416 reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, int *nclaimed) argument 478 mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, s32 npages) argument 524 s64 npages = 0; local [all...] |
H A D | mlx5_alloc.c | 77 buf->npages = howmany(size, PAGE_SIZE); 81 buf->page_list = kcalloc(buf->npages, sizeof(*buf->page_list), 91 PAGE_SIZE * buf->npages, /* maxsize */ 92 buf->npages, /* nsegments */ 111 PAGE_SIZE * buf->npages, &mlx5_buf_load_mem_cb, 125 memset(buf->direct.buf, 0, PAGE_SIZE * buf->npages); 251 for (i = 0; i != buf->npages; i++)
|
/freebsd-current/lib/libgeom/ |
H A D | geom_stats.c | 48 static uint npages, spp; variable 57 munmap(statp, npages * pagesize); 76 munmap(statp, npages * pagesize); 82 npages = mediasize / pagesize; 108 npages = 1; 132 sp->ptr = malloc(pagesize * npages); 137 explicit_bzero(sp->ptr, pagesize * npages); /* page in, cache */ 139 memcpy(sp->ptr, statp, pagesize * npages); 140 sp->pages = npages;
|
/freebsd-current/contrib/netbsd-tests/sys/uvm/ |
H A D | t_uvm_physseg.c | 281 uvmexp.npages = 0; 487 ATF_REQUIRE_EQ(0, uvmexp.npages); 492 ATF_REQUIRE_EQ(0, uvmexp.npages); 501 , uvmexp.npages); 510 + npages3, uvmexp.npages); 554 ATF_REQUIRE_EQ(0, uvmexp.npages); /* Boot time sanity */ 579 ATF_CHECK_EQ(atop(FIVEONETWO_KILO), uvmexp.npages); 648 ATF_REQUIRE_EQ(0, uvmexp.npages); 661 ATF_REQUIRE_EQ(0, uvmexp.npages); 692 ATF_REQUIRE_EQ(0, uvmexp.npages); 776 const size_t npages = UVM_PHYSSEG_BOOT_UNPLUG_MAX; /* Number of pages */ local 831 const size_t npages = UVM_PHYSSEG_BOOT_UNPLUG_MAX; /* Number of pages */ local 869 const size_t npages = UVM_PHYSSEG_BOOT_UNPLUG_MAX; /* Number of pages */ local 1493 psize_t npages = (VALID_END_PFN_1 - VALID_START_PFN_1); local 1642 psize_t npages = (VALID_END_PFN_1 - VALID_START_PFN_1); local 1699 psize_t npages = (VALID_END_PFN_1 - VALID_START_PFN_1); local 1743 psize_t npages = (VALID_END_PFN_1 - VALID_START_PFN_1); local 1923 psize_t npages = (VALID_END_PFN_2 - VALID_START_PFN_2); local 2028 psize_t npages = (VALID_END_PFN_2 - VALID_START_PFN_2); local 2200 psize_t npages = (VALID_END_PFN_2 - VALID_START_PFN_2); local [all...] |
/freebsd-current/sys/dev/drm2/ttm/ |
H A D | ttm_page_alloc.c | 63 * @npages: Number of pages in pool. 71 unsigned npages; member in struct:ttm_page_pool 326 static void ttm_pages_put(vm_page_t *pages, unsigned npages) argument 331 if (set_pages_array_wb(pages, npages)) 332 printf("[TTM] Failed to set %d pages to wb!\n", npages); 333 for (i = 0; i < npages; ++i) 340 pool->npages -= freed_pages; 436 total += _manager->pools[i].npages; 617 && count > pool->npages) { 635 pool->npages 687 ttm_put_pages(vm_page_t *pages, unsigned npages, int flags, enum ttm_caching_state cstate) argument 730 ttm_get_pages(vm_page_t *pages, unsigned npages, int flags, enum ttm_caching_state cstate) argument [all...] |
/freebsd-current/sys/ofed/drivers/infiniband/core/ |
H A D | ib_umem.c | 62 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { 93 unsigned long npages; local 157 npages = ib_umem_num_pages(umem); 161 locked = npages + current->mm->pinned_vm; 165 if (npages == 0 || npages > UINT_MAX) { 170 ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL); 180 while (npages) { 182 min_t(unsigned long, npages, 189 umem->npages [all...] |
H A D | ib_core_uverbs.c | 198 if (entry->npages * PAGE_SIZE != vma->vm_end - vma->vm_start) { 218 for (i = 0; i < entry->npages; i++) 292 u32 xa_first, xa_last, npages; local 314 npages = (u32)DIV_ROUND_UP(length, PAGE_SIZE); 315 entry->npages = npages; 318 for (i = min_pgoff, j = 0; (i + j) <= max_pgoff && j != npages; ) { 331 if (j != npages)
|
/freebsd-current/sys/vm/ |
H A D | vm_reserv.h | 49 int domain, int req, vm_page_t mpred, u_long npages, 58 bool vm_reserv_is_populated(vm_page_t m, int npages); 61 vm_page_t vm_reserv_reclaim_contig(int domain, u_long npages,
|
H A D | vm_phys.h | 62 vm_page_t vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, 66 int vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[]); 69 void vm_phys_enqueue_contig(vm_page_t m, u_long npages); 75 u_long npages, vm_paddr_t low, vm_paddr_t high); 76 void vm_phys_free_contig(vm_page_t m, u_long npages);
|
H A D | vm_phys.c | 498 u_long npages; local 510 npages = 0; 527 npages > VM_DMA32_NPAGES_THRESHOLD && 535 npages += atop(seg->end - seg->start); 556 npages = 0; 561 seg->first_page = &vm_page_array[npages]; 562 npages += atop(seg->end - seg->start); 684 * Add the physical pages [m, m + npages) at the beginning of a power-of-two 696 vm_phys_enq_beg(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail) argument 700 KASSERT(npages 732 vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail) argument 780 vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[]) argument 1176 vm_phys_enqueue_contig(vm_page_t m, u_long npages) argument 1219 vm_phys_free_contig(vm_page_t m, u_long npages) argument 1256 vm_phys_find_range(vm_page_t bounds[], int segind, int domain, u_long npages, vm_paddr_t low, vm_paddr_t high) argument 1351 vm_phys_find_freelist_contig(struct vm_freelist *fl, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) argument 1424 vm_phys_find_queues_contig( struct vm_freelist (*queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX], u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) argument 1483 vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) argument [all...] |
H A D | sg_pager.c | 76 vm_pindex_t npages, pindex; local 89 npages = 0; 95 npages += sg->sg_segs[i].ss_len / PAGE_SIZE; 104 if (pindex > npages || pindex < OFF_TO_IDX(foff) || 116 object = vm_object_allocate(OBJT_SG, npages);
|
/freebsd-current/sys/dev/mlx4/mlx4_core/ |
H A D | mlx4_icm.c | 60 pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages, 63 for (i = 0; i < chunk->npages; ++i) 72 for (i = 0; i < chunk->npages; ++i) 128 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, argument 154 while (npages > 0) { 169 chunk->npages = 0; 174 while (1 << cur_order > npages) 179 &chunk->mem[chunk->npages], 182 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], 193 ++chunk->npages; [all...] |
H A D | mlx4_mr.c | 200 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, argument 205 if (!npages) { 212 for (mtt->order = 0, i = 1; i < npages; i <<= 1) 423 u64 iova, u64 size, u32 access, int npages, 433 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); 533 int npages, int page_shift, struct mlx4_mr *mr) 543 access, npages, page_shift, mr); 595 u64 iova, u64 size, int npages, 600 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); 698 int start_index, int npages, u6 422 mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, u64 iova, u64 size, u32 access, int npages, int page_shift, struct mlx4_mr *mr) argument 532 mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, int npages, int page_shift, struct mlx4_mr *mr) argument 594 mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, u64 iova, u64 size, int npages, int page_shift, struct mlx4_mpt_entry *mpt_entry) argument 697 mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument 723 __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument 751 mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument 975 mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, int npages, u64 iova) argument 1002 mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, int npages, u64 iova, u32 *lkey, u32 *rkey) argument [all...] |
/freebsd-current/sys/dev/gve/ |
H A D | gve_qpl.c | 97 gve_alloc_qpl(struct gve_priv *priv, uint32_t id, int npages, bool single_kva) argument 103 if (npages + priv->num_registered_pages > priv->max_registered_pages) { 105 (uintmax_t)npages + priv->num_registered_pages, 114 qpl->dmas = malloc(npages * sizeof(*qpl->dmas), M_GVE_QPL, 117 qpl->pages = malloc(npages * sizeof(*qpl->pages), M_GVE_QPL, 122 qpl->kva = kva_alloc(PAGE_SIZE * npages); 130 for (i = 0; i < npages; i++) { 151 pmap_qenter(qpl->kva, qpl->pages, npages); 153 for (i = 0; i < npages; i++) {
|
/freebsd-current/lib/libusbhid/ |
H A D | usage.c | 54 static int npages, npagesmax; variable 62 for (i = 0; i < npages; i++) { 125 if (npages >= npagesmax) { 139 curpage = &pages[npages++]; 166 for (k = 0; k < npages; k++) 181 for (k = 0; k < npages; k++) 184 if (k >= npages) 210 for (k = 0; k < npages; k++) 228 for (k = 0; k < npages; k++)
|
/freebsd-current/usr.sbin/lpr/filters/ |
H A D | lpf.c | 57 static int npages = 1; variable 191 npages++; 199 npages++; 203 printf("%7.2f\t%s:%s\n", (float)npages, host, name);
|
/freebsd-current/sys/dev/virtio/balloon/ |
H A D | virtio_balloon.c | 332 vtballoon_inflate(struct vtballoon_softc *sc, int npages) argument 340 if (npages > VTBALLOON_PAGES_PER_REQUEST) 341 npages = VTBALLOON_PAGES_PER_REQUEST; 343 for (i = 0; i < npages; i++) { 362 vtballoon_deflate(struct vtballoon_softc *sc, int npages) argument 372 if (npages > VTBALLOON_PAGES_PER_REQUEST) 373 npages = VTBALLOON_PAGES_PER_REQUEST; 375 for (i = 0; i < npages; i++) { 406 int npages) 416 npages * sizeo 405 vtballoon_send_page_frames(struct vtballoon_softc *sc, struct virtqueue *vq, int npages) argument 490 uint32_t npages; local [all...] |
/freebsd-current/sys/dev/mthca/ |
H A D | mthca_allocator.c | 199 int npages, shift; local 206 npages = 1; 220 npages *= 2; 223 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); 227 for (i = 0; i < npages; ++i) 231 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; 234 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); 238 buf->page_list = kmalloc(npages * sizeof *buf->page_list, 243 for (i = 0; i < npages; ++i) 246 for (i = 0; i < npages; [all...] |
H A D | mthca_memfree.c | 68 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, 71 for (i = 0; i < chunk->npages; ++i) 80 for (i = 0; i < chunk->npages; ++i) { 136 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, argument 156 while (npages > 0) { 164 chunk->npages = 0; 169 while (1 << cur_order > npages) 174 &chunk->mem[chunk->npages], 177 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages], 181 ++chunk->npages; 524 int npages; local [all...] |
/freebsd-current/sys/arm/nvidia/drm2/ |
H A D | tegra_bo.c | 59 pmap_qremove(bo->vbase, bo->npages); 62 for (i = 0; i < bo->npages; i++) { 93 tegra_bo_alloc_contig(size_t npages, u_long alignment, vm_memattr_t memattr, argument 105 m = vm_page_alloc_noobj_contig(VM_ALLOC_WIRED | VM_ALLOC_ZERO, npages, 109 err = vm_page_reclaim_contig(0, npages, low, high, 121 for (i = 0; i < npages; i++, m++) { 144 for (i = 0; i < bo->npages; i++) { 164 pmap_qenter(bo->vbase, bo->m, bo->npages); 177 bo->npages = atop(size); 178 bo->m = malloc(sizeof(vm_page_t *) * bo->npages, DRM_MEM_DRIVE [all...] |
/freebsd-current/sys/kern/ |
H A D | kern_sendfile.c | 92 int npages; member in struct:sf_io 327 for (i = 1; i < sfio->npages; i++) { 384 mb_free_notready(sfio->m, sfio->npages); 396 ktls_enqueue(sfio->m, so, sfio->npages); 400 (void)so->so_proto->pr_ready(so, sfio->m, sfio->npages); 418 int a, count, count1, grabbed, i, j, npages, rv; local 421 npages = sfio->npages; 431 VM_ALLOC_NORMAL | VM_ALLOC_WIRED | flags, pa, npages); 432 if (grabbed < npages) { 760 int nios, space, npages, rhpages; local [all...] |
H A D | kern_physio.c | 48 int error, i, npages, maxpages; local 52 npages = 0; 158 if ((npages = vm_fault_quick_hold_pages( 168 pages, npages); 172 bp->bio_ma_n = npages; 188 pmap_qremove((vm_offset_t)sa, npages); 189 vm_page_unhold_pages(pages, npages);
|
/freebsd-current/sys/dev/mlx5/mlx5_ib/ |
H A D | mlx5_ib_mr.c | 159 int npages = 1 << ent->order; local 190 MLX5_SET(mkc, mkc, translations_octword_size, (npages + 1) / 2); 534 int npages; local 537 npages = ALIGN(len + offset, page_size) >> ilog2(page_size); 538 return (npages + 1) / 2; 547 int npages, int page_shift, int *size, 558 *size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT); 566 memset(pas + npages, 0, *size - npages * sizeof(u64)); 596 umrwr->npages 546 dma_map_mr_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, int npages, int page_shift, int *size, __be64 **mr_pas, dma_addr_t *dma) argument 624 mr_umem_get(struct ib_pd *pd, u64 start, u64 length, int access_flags, int *npages, int *page_shift, int *ncont, int *order) argument 665 reg_umr(struct ib_pd *pd, struct ib_umem *umem, u64 virt_addr, u64 len, int npages, int page_shift, int order, int access_flags) argument 744 mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages, int zap) argument 870 reg_create(struct ib_mr *ibmr, struct ib_pd *pd, u64 virt_addr, u64 length, struct ib_umem *umem, int npages, int page_shift, int access_flags) argument 946 set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, int npages, u64 length, int access_flags) argument 965 int npages; local 1057 rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr, u64 length, int npages, int page_shift, int order, int access_flags, int flags) argument 1138 int npages = 0; local 1311 int npages = mr->npages; local [all...] |