Lines Matching defs:pg

3  * Infrastructure for profiling code inserted by 'gcc -pg'.
457 struct ftrace_profile_page *pg;
459 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
465 if ((void *)rec >= (void *)&pg->records[pg->index]) {
466 pg = pg->next;
467 if (!pg)
469 rec = &pg->records[0];
598 struct ftrace_profile_page *pg;
600 pg = stat->pages = stat->start;
602 while (pg) {
603 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
604 pg->index = 0;
605 pg = pg->next;
614 struct ftrace_profile_page *pg;
640 pg = stat->start = stat->pages;
645 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
646 if (!pg->next)
648 pg = pg->next;
654 pg = stat->start;
655 while (pg) {
656 unsigned long tmp = (unsigned long)pg;
658 pg = pg->next;
1537 #define do_for_each_ftrace_rec(pg, rec) \
1538 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1540 for (_____i = 0; _____i < pg->index; _____i++) { \
1541 rec = &pg->records[_____i];
1562 struct ftrace_page *pg;
1569 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1570 if (pg->index == 0 ||
1571 end < pg->records[0].ip ||
1572 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1574 rec = bsearch(&key, pg->records, pg->index,
1703 struct ftrace_page *pg;
1741 do_for_each_ftrace_rec(pg, rec) {
1946 struct ftrace_page *pg;
1974 do_for_each_ftrace_rec(pg, rec) {
2020 do_for_each_ftrace_rec(pg, rec) {
2690 struct ftrace_page *pg;
2698 do_for_each_ftrace_rec(pg, rec) {
2715 struct ftrace_page *pg;
2737 iter->pg = ftrace_pages_start;
2741 while (iter->pg && !iter->pg->index)
2742 iter->pg = iter->pg->next;
2744 if (!iter->pg)
2760 if (iter->index >= iter->pg->index) {
2761 iter->pg = iter->pg->next;
2765 while (iter->pg && !iter->pg->index)
2766 iter->pg = iter->pg->next;
2769 if (!iter->pg)
2783 return &iter->pg->records[iter->index];
3120 struct ftrace_page *pg;
3123 do_for_each_ftrace_rec(pg, rec) {
3185 struct ftrace_page *pg;
3208 for (pg = new_pgs; pg; pg = pg->next) {
3210 for (i = 0; i < pg->index; i++) {
3216 p = &pg->records[i];
3237 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
3251 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
3253 if (!pg->records) {
3265 pg->order = order;
3275 struct ftrace_page *pg = pages;
3277 while (pg) {
3278 if (pg->records) {
3279 free_pages((unsigned long)pg->records, pg->order);
3280 ftrace_number_of_pages -= 1 << pg->order;
3282 pages = pg->next;
3283 kfree(pg);
3284 pg = pages;
3293 struct ftrace_page *pg;
3299 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3300 if (!pg)
3309 cnt = ftrace_allocate_records(pg, num_to_init);
3317 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3318 if (!pg->next)
3321 pg = pg->next;
3338 struct ftrace_page *pg;
3568 if (iter->idx >= iter->pg->index) {
3569 if (iter->pg->next) {
3570 iter->pg = iter->pg->next;
3575 rec = &iter->pg->records[iter->idx++];
3677 iter->pg = ftrace_pages_start;
3747 struct ftrace_page *pg;
3751 do_for_each_ftrace_rec(pg, rec) {
3917 iter->pg = ftrace_pages_start;
3941 iter->pg = ftrace_pages_start;
3966 iter->pg = ftrace_pages_start;
3990 iter->pg = ftrace_pages_start;
4076 iter->pg = ftrace_pages_start;
4204 struct ftrace_page *pg;
4211 do_for_each_ftrace_rec(pg, rec) {
4212 if (pg->index <= index) {
4213 index -= pg->index;
4217 rec = &pg->records[index];
4288 struct ftrace_page *pg;
4320 do_for_each_ftrace_rec(pg, rec) {
6282 struct ftrace_page *pg;
6300 do_for_each_ftrace_rec(pg, rec) {
6483 struct ftrace_page *pg;
6538 pg = start_pg;
6553 end_offset = (pg->index+1) * sizeof(pg->records[0]);
6554 if (end_offset > PAGE_SIZE << pg->order) {
6556 if (WARN_ON(!pg->next))
6558 pg = pg->next;
6561 rec = &pg->records[pg->index++];
6565 if (pg->next) {
6566 pg_unuse = pg->next;
6567 pg->next = NULL;
6571 ftrace_pages = pg;
6701 clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
6710 for (i = 0; i < pg->index; i++) {
6711 rec = &pg->records[i];
6724 static void clear_mod_from_hashes(struct ftrace_page *pg)
6733 clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
6734 clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
6763 struct ftrace_page *pg;
6783 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
6784 rec = &pg->records[0];
6790 if (WARN_ON(pg == ftrace_pages_start))
6794 if (pg == ftrace_pages)
6797 ftrace_update_tot_cnt -= pg->index;
6798 *last_pg = pg->next;
6800 pg->next = tmp_page;
6801 tmp_page = pg;
6803 last_pg = &pg->next;
6811 for (pg = tmp_page; pg; pg = tmp_page) {
6814 clear_mod_from_hashes(pg);
6816 if (pg->records) {
6817 free_pages((unsigned long)pg->records, pg->order);
6818 ftrace_number_of_pages -= 1 << pg->order;
6820 tmp_page = pg->next;
6821 kfree(pg);
6829 struct ftrace_page *pg;
6852 do_for_each_ftrace_rec(pg, rec) {
6856 * module text shares the pg. If a record is
6857 * not part of this module, then skip this pg,
7145 struct ftrace_page *pg;
7165 for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
7166 if (end < pg->records[0].ip ||
7167 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
7170 rec = bsearch(&key, pg->records, pg->index,
7182 pg->index--;
7184 if (!pg->index) {
7185 *last_pg = pg->next;
7186 pg->next = tmp_page;
7187 tmp_page = pg;
7188 pg = container_of(last_pg, struct ftrace_page, next);
7190 ftrace_pages = pg;
7194 (pg->index - (rec - pg->records)) * sizeof(*rec));