Searched refs:seg (Results 1 - 25 of 268) sorted by relevance

1234567891011

/linux-master/arch/s390/mm/
H A Dextmem.c165 query_segment_type (struct dcss_segment *seg) argument
183 memcpy (qin->qname, seg->dcss_name, 8);
203 seg->vm_segtype = qout->range[0].start & 0xff;
222 seg->vm_segtype = SEG_TYPE_EWEN;
225 /* analyze diag output and update seg */
226 seg->start_addr = qout->segstart;
227 seg->end = qout->segend;
229 memcpy (seg->range, qout->range, 6*sizeof(struct qrange));
230 seg->segcnt = qout->segcnt;
254 struct dcss_segment seg; local
271 segment_overlaps_others(struct dcss_segment *seg) argument
298 struct dcss_segment *seg; local
416 struct dcss_segment *seg; local
457 struct dcss_segment *seg; local
539 struct dcss_segment *seg; local
568 struct dcss_segment *seg; local
[all...]
/linux-master/ipc/
H A Dmsgutil.c63 struct msg_msgseg *seg; local
68 seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL_ACCOUNT);
69 if (seg == NULL)
71 *pseg = seg;
72 seg->next = NULL;
73 pseg = &seg->next;
87 struct msg_msgseg *seg; local
99 for (seg = msg->next; seg !
153 struct msg_msgseg *seg; local
171 struct msg_msgseg *seg; local
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/steering/
H A Ddr_buddy.c77 unsigned int seg, order_iter, m; local
85 seg = find_first_bit(buddy->bitmap[order_iter], m);
87 if (WARN(seg >= m,
98 *segment = seg;
123 unsigned int seg, order_iter; local
126 err = dr_buddy_find_free_seg(buddy, order, &seg, &order_iter);
130 bitmap_clear(buddy->bitmap[order_iter], seg, 1);
139 seg <<= 1;
140 bitmap_set(buddy->bitmap[order_iter], seg ^ 1, 1);
144 seg <<
150 mlx5dr_buddy_free_mem(struct mlx5dr_icm_buddy_mem *buddy, unsigned int seg, unsigned int order) argument
[all...]
/linux-master/drivers/firmware/google/
H A Dmemconsole-coreboot.c45 struct seg { /* describes ring buffer segments in logical order */ struct
48 } seg[2] = { {0}, {0} }; local
55 seg[0] = (struct seg){.phys = cursor, .len = size - cursor};
56 seg[1] = (struct seg){.phys = 0, .len = cursor};
58 seg[0] = (struct seg){.phys = 0, .len = min(cursor, size)};
61 for (i = 0; i < ARRAY_SIZE(seg) && count > done; i++) {
63 cbmem_console->body + seg[
[all...]
/linux-master/drivers/infiniband/hw/mlx5/
H A Dwr.c55 void **seg, int *size, void **cur_edge)
57 struct mlx5_wqe_eth_seg *eseg = *seg;
85 *seg += stride;
88 handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
91 mlx5r_memcpy_send_wqe(&qp->sq, cur_edge, seg, size,
98 *seg += sizeof(struct mlx5_wqe_eth_seg);
189 static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg, argument
195 memset(seg, 0, sizeof(*seg));
198 seg
54 set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp, void **seg, int *size, void **cur_edge) argument
211 set_linv_mkey_seg(struct mlx5_mkey_seg *seg) argument
263 struct mlx5_wqe_inline_seg *seg; local
416 set_sig_data_segment(const struct ib_send_wr *send_wr, struct ib_mr *sig_mr, struct ib_sig_attrs *sig_attrs, struct mlx5_ib_qp *qp, void **seg, int *size, void **cur_edge) argument
528 set_sig_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_mr *sig_mr, int access_flags, u32 size, u32 length, u32 pdn) argument
557 set_pi_umr_wr(const struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp, void **seg, int *size, void **cur_edge) argument
608 set_psv_wr(struct ib_sig_domain *domain, u32 psv_idx, void **seg, int *size) argument
635 set_reg_wr(struct mlx5_ib_qp *qp, const struct ib_reg_wr *wr, void **seg, int *size, void **cur_edge, bool check_not_free) argument
692 set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size, void **cur_edge) argument
724 mlx5r_begin_wqe(struct mlx5_ib_qp *qp, void **seg, struct mlx5_wqe_ctrl_seg **ctrl, unsigned int *idx, int *size, void **cur_edge, int nreq, __be32 general_id, bool send_signaled, bool solicited) argument
748 begin_wqe(struct mlx5_ib_qp *qp, void **seg, struct mlx5_wqe_ctrl_seg **ctrl, const struct ib_send_wr *wr, unsigned int *idx, int *size, void **cur_edge, int nreq) argument
758 mlx5r_finish_wqe(struct mlx5_ib_qp *qp, struct mlx5_wqe_ctrl_seg *ctrl, void *seg, u8 size, void *cur_edge, unsigned int idx, u64 wr_id, int nreq, u8 fence, u32 mlx5_opcode) argument
787 handle_rdma_op(const struct ib_send_wr *wr, void **seg, int *size) argument
794 handle_local_inv(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int idx) argument
803 handle_reg_mr(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int idx) argument
812 handle_psv(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int *idx, int nreq, struct ib_sig_domain *domain, u32 psv_index, u8 next_fence) argument
843 handle_reg_mr_integrity(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int *idx, int nreq, u8 fence, u8 next_fence) argument
931 handle_qpt_rc(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int *idx, int nreq, u8 fence, u8 next_fence, int *num_sge) argument
982 handle_qpt_uc(const struct ib_send_wr *wr, void **seg, int *size) argument
994 handle_qpt_hw_gsi(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, void **seg, int *size, void **cur_edge) argument
1004 handle_qpt_ud(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, void **seg, int *size, void **cur_edge) argument
1065 void *seg; local
[all...]
H A Dwr.h47 * @seg: Current WQE position (16B aligned).
51 static inline void handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg, argument
56 if (likely(*seg != *cur_edge))
62 *seg = mlx5_frag_buf_get_wqe(&sq->fbc, idx);
66 * WQ's pointers. At the end @seg is aligned to 16B regardless the copied size.
69 * @seg: Current WQE position (16B aligned).
75 void **seg, u32 *wqe_sz,
79 size_t leftlen = *cur_edge - *seg;
83 memcpy(*seg, src, copysz);
88 *seg
74 mlx5r_memcpy_send_wqe(struct mlx5_ib_wq *sq, void **cur_edge, void **seg, u32 *wqe_sz, const void *src, size_t n) argument
[all...]
/linux-master/drivers/gpu/drm/amd/display/dmub/src/
H A Ddmub_dcn302.c34 #define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg
H A Ddmub_dcn301.c34 #define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg
H A Ddmub_dcn21.c34 #define BASE_INNER(seg) DMU_BASE__INST0_SEG##seg
H A Ddmub_dcn303.c35 #define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg
H A Ddmub_dcn315.c40 #define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg
H A Ddmub_dcn316.c40 #define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg
H A Ddmub_dcn314.c40 #define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg
/linux-master/arch/x86/boot/compressed/
H A Dcmdline.c7 static inline void set_fs(unsigned long seg) argument
9 fs = seg << 4; /* shift it back */
/linux-master/arch/m68k/sun3/
H A Dmmu_emu.c129 unsigned long seg, num; local
152 for(seg = bootmem_end; seg < 0x0f800000; seg += SUN3_PMEG_SIZE) {
153 i = sun3_get_segmap(seg);
158 print_pte_vaddr (seg);
160 sun3_put_segmap(seg, SUN3_INVALID_PMEG);
165 for (num=0, seg=0x0F800000; seg<0x10000000; seg
[all...]
/linux-master/tools/testing/selftests/sgx/
H A Dload.c106 static bool encl_ioc_add_pages(struct encl *encl, struct encl_segment *seg) argument
113 secinfo.flags = seg->flags;
115 ioc.src = (uint64_t)seg->src;
116 ioc.offset = seg->offset;
117 ioc.length = seg->size;
119 if (seg->measure)
180 struct encl_segment *seg; local
253 seg = &encl->segment_tbl[j];
273 seg->prot = PROT_READ | PROT_WRITE;
274 seg
356 struct encl_segment *seg = &encl->segment_tbl[i]; local
[all...]
/linux-master/arch/m68k/include/asm/
H A Dtlbflush.h142 unsigned char seg; local
149 seg = sun3_get_segmap(i);
150 if (seg == SUN3_INVALID_PMEG)
154 pmeg_alloc[seg] = 0;
155 pmeg_ctx[seg] = 0;
156 pmeg_vaddr[seg] = 0;
189 unsigned char seg, oldctx; local
198 if((seg = sun3_get_segmap(start)) == SUN3_INVALID_PMEG)
200 if(pmeg_ctx[seg] == mm->context) {
201 pmeg_alloc[seg]
[all...]
/linux-master/arch/x86/pci/
H A Dnumachip.c19 static inline char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn) argument
21 struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus);
28 static int pci_mmcfg_read_numachip(unsigned int seg, unsigned int bus, argument
46 addr = pci_dev_base(seg, bus, devfn);
68 static int pci_mmcfg_write_numachip(unsigned int seg, unsigned int bus, argument
82 addr = pci_dev_base(seg, bus, devfn);
H A Dmmconfig_64.c19 static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn) argument
21 struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus);
28 static int pci_mmcfg_read(unsigned int seg, unsigned int bus, argument
40 addr = pci_dev_base(seg, bus, devfn);
62 static int pci_mmcfg_write(unsigned int seg, unsigned int bus, argument
72 addr = pci_dev_base(seg, bus, devfn);
H A Dmmconfig_32.c27 static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn) argument
29 struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus);
51 static int pci_mmcfg_read(unsigned int seg, unsigned int bus, argument
63 base = get_base_addr(seg, bus, devfn);
90 static int pci_mmcfg_write(unsigned int seg, unsigned int bus, argument
100 base = get_base_addr(seg, bus, devfn);
/linux-master/fs/nfsd/
H A Dflexfilelayout.c26 struct nfsd4_layout_seg *seg = &args->lg_seg; local
53 if (seg->iomode == IOMODE_READ) {
68 seg->offset = 0;
69 seg->length = NFS4_MAX_UINT64;
71 dprintk("GET: 0x%llx:0x%llx %d\n", seg->offset, seg->length,
72 seg->iomode);
76 seg->length = 0;
/linux-master/drivers/acpi/
H A Dpci_mcfg.c46 #define AL_ECAM(table_id, rev, seg, ops) \
47 { "AMAZON", table_id, rev, seg, MCFG_BUS_ANY, ops }
58 #define QCOM_ECAM32(seg) \
59 { "QCOM ", "QDF2432 ", 1, seg, MCFG_BUS_ANY, &pci_32b_ops }
70 #define HISI_QUAD_DOM(table_id, seg, ops) \
71 { "HISI ", table_id, 0, (seg) + 0, MCFG_BUS_ANY, ops }, \
72 { "HISI ", table_id, 0, (seg) + 1, MCFG_BUS_ANY, ops }, \
73 { "HISI ", table_id, 0, (seg) + 2, MCFG_BUS_ANY, ops }, \
74 { "HISI ", table_id, 0, (seg) + 3, MCFG_BUS_ANY, ops }
100 #define THUNDER_ECAM_QUIRK(rev, seg) \
238 u16 seg = root->segment; local
[all...]
/linux-master/kernel/rcu/
H A Drcu_segcblist.h18 long rcu_segcblist_get_seglen(struct rcu_segcblist *rsclp, int seg);
112 static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg) argument
114 return !READ_ONCE(*READ_ONCE(rsclp->tails[seg]));
121 static inline bool rcu_segcblist_segempty(struct rcu_segcblist *rsclp, int seg) argument
123 if (seg == RCU_DONE_TAIL)
125 return rsclp->tails[seg - 1] == rsclp->tails[seg];
/linux-master/arch/x86/kvm/
H A Dmtrr.c189 static u64 fixed_mtrr_seg_unit_size(int seg) argument
191 return 8 << fixed_seg_table[seg].range_shift;
194 static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit) argument
198 *seg = 0;
202 *seg = 1;
208 *seg = 2;
220 static void fixed_mtrr_seg_unit_range(int seg, int unit, u64 *start, u64 *end) argument
222 struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
223 u64 unit_size = fixed_mtrr_seg_unit_size(seg);
230 static int fixed_mtrr_seg_unit_range_index(int seg, in argument
241 fixed_mtrr_seg_end_range_index(int seg) argument
252 int seg, unit; local
263 int seg, unit; local
274 int seg, seg_num = ARRAY_SIZE(fixed_seg_table); local
285 fixed_mtrr_addr_seg_to_range_index(u64 addr, int seg) argument
296 fixed_mtrr_range_end_addr(int seg, int index) argument
457 int seg; member in struct:mtrr_iter::__anon103::__anon104
473 int seg, index; local
[all...]
/linux-master/net/ipv6/
H A Dtcpv6_offload.c96 static void __tcpv6_gso_segment_csum(struct sk_buff *seg, argument
104 th = tcp_hdr(seg);
105 inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
113 struct sk_buff *seg; local
117 seg = segs;
118 th = tcp_hdr(seg);
119 iph = ipv6_hdr(seg);
120 th2 = tcp_hdr(seg->next);
121 iph2 = ipv6_hdr(seg->next);
128 while ((seg
[all...]

Completed in 239 milliseconds

1234567891011