Lines Matching defs:from

84 /* Active memory cgroup to use from an interrupt context */
186 spinlock_t lock; /* for from, to */
188 struct mem_cgroup *from;
284 * 3) CPU1: a process from another memcg is allocating something,
394 * the cgroup inode from getting torn down and potentially reallocated a moment
550 goto done; /* Nothing to reclaim from */
1005 * The caller from rmap relies on disabled preemption because they never
1006 * update their counter from in-interrupt context. For these two
1007 * counters we check that the update is never performed from an
1164 /* from time_after() in jiffies.h */
1228 * @mm: mm from which memcg should be extracted. It can be NULL.
1354 * is called from a work queue, and by busy-waiting we
1374 * one group and restart from the beginning.
1396 * it to avoid reclaiming from the same cgroup twice.
1429 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1437 mz = from->nodeinfo[nid];
1457 * dead_memcg from cgroup root separately.
1595 * to or just after a page is removed from an lru list.
1658 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1664 struct mem_cgroup *from;
1668 * Unlike task_move routines, we access mc.to, mc.from not under
1672 from = mc.from;
1674 if (!from)
1677 ret = mem_cgroup_is_descendant(from, memcg) ||
1935 /* Calculate swap excess capacity from memsw limit */
2014 * coming back to reclaim from this cgroup
2185 * victim and then we have to bail out from the charge path.
2304 * Traverse the memory cgroup hierarchy from the victim task's
2335 * This function prevents unlocked LRU folios from being moved to
2457 * @memcg: memcg to consume from.
2520 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
2580 * as well as workers from this path always operate on the local
2793 * Reclaims memory over the high limit. Called directly from
2794 * try_charge() (context permitting), as well as from the userland
2830 * This is distinct from memory.max or page allocator behaviour because
3073 * Reclaim is set up above to be called from the userland
3273 * Release the objcg pointer from the previous iteration,
3383 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
3688 * to allow one less page from being charged, but we can't rely
3836 * @from: mem_cgroup which the entry is moved from
3840 * as the mem_cgroup's id of @from.
3848 struct mem_cgroup *from, struct mem_cgroup *to)
3852 old_id = mem_cgroup_id(from);
3856 mod_memcg_state(from, MEMCG_SWAP, -1);
3864 struct mem_cgroup *from, struct mem_cgroup *to)
3972 * If we failed to reclaim anything from this memory cgroup
3985 * memory to reclaim from. Consider this as a longer
4009 * Reclaims as many pages from the given memcg as possible.
4071 * Approximate root's usage from global state. This isn't
4627 * Iterate backward over array of thresholds starting from
4639 * Iterate forward over array of thresholds starting from
4984 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
5183 * Gets called from workqueue.
5556 * those dead CSS from occupying IDs, or we might quickly exhaust the
5566 * those references are manageable from userspace.
5602 * mem_cgroup_from_id - look up a memcg from a memcg id
6186 * @from: mem_cgroup which the folio is moved from.
6187 * @to: mem_cgroup which the folio is moved to. @from != @to.
6192 * from old cgroup.
6196 struct mem_cgroup *from,
6204 VM_BUG_ON(from == to);
6210 if (folio_memcg(folio) != from)
6214 from_vec = mem_cgroup_lruvec(from, pgdat);
6283 css_put(&from->css);
6287 __folio_memcg_unlock(from);
6295 mem_cgroup_charge_statistics(from, -nr_pages);
6296 memcg_check_events(from, nid);
6336 * from other swap handling below.
6375 if (folio_memcg(folio) == mc.from) {
6394 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
6425 if (folio_memcg(folio) == mc.from) {
6507 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
6510 struct mem_cgroup *from = mc.from;
6513 /* we must uncharge all the leftover precharges from mc.to */
6519 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
6523 mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
6528 /* uncharge swap account from the old cgroup */
6529 if (!mem_cgroup_is_root(mc.from))
6530 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
6532 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
6543 memcg_oom_recover(from);
6559 mc.from = NULL;
6571 struct mem_cgroup *from;
6605 from = mem_cgroup_from_task(p);
6607 VM_BUG_ON(from == memcg);
6614 VM_BUG_ON(mc.from);
6622 mc.from = from;
6666 mc.from, mc.to)) {
6677 mc.from, mc.to)) {
6717 mc.from, mc.to)) {
6719 /* we uncharge from mc.from later. */
6730 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6772 atomic_inc(&mc.from->moving_account);
6793 atomic_dec(&mc.from->moving_account);
7321 * protection which is derived from its own memory.min/low, its
7346 * budget is NOT proportional. A cgroup's protection from a sibling
7349 * 5. However, to allow protecting recursive subtrees from each other
7352 * "floating" - protection from up the tree is distributed in
7356 * subtree as a whole from neighboring subtrees.
7394 * shielded from reclaim, but we do return a smaller value for
7408 * collectively protected from neighboring trees.
7524 * as the hugetlb folio itself has not been obtained from the hugetlb pool.
7651 /* drop reference from uncharge_folio */
7793 * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
7797 * Transfer the memcg data from the old folio to the new folio for migration.
7829 * to be removed from the split queue now, in case getting an incorrect
7834 * removing from the split queue a bit earlier seems reasonable.
7946 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7949 * should be initialized from here.