本文整理汇总了C++中zone_page_state函数的典型用法代码示例。如果您正苦于以下问题:C++ zone_page_state函数的具体用法?C++ zone_page_state怎么用?C++ zone_page_state使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了zone_page_state函数的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: tune_lmk_zone_param
void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx,
int *other_free, int *other_file)
{
struct zone *zone;
struct zoneref *zoneref;
int zone_idx;
for_each_zone_zonelist(zone, zoneref, zonelist, MAX_NR_ZONES) {
zone_idx = zonelist_zone_idx(zoneref);
if (zone_idx == ZONE_MOVABLE) {
continue;
}
if (zone_idx > classzone_idx) {
if (other_free != NULL)
*other_free -= zone_page_state(zone,
NR_FREE_PAGES);
if (other_file != NULL)
*other_file -= zone_page_state(zone,
NR_FILE_PAGES)
- zone_page_state(zone, NR_SHMEM);
} else if (zone_idx < classzone_idx) {
if (zone_watermark_ok(zone, 0, 0, classzone_idx, 0)) {
*other_free -=
zone->lowmem_reserve[classzone_idx];
} else {
*other_free -=
zone_page_state(zone, NR_FREE_PAGES);
}
}
}
示例2: highmem_dirtyable_memory
static unsigned long highmem_dirtyable_memory(unsigned long total)
{
#ifdef CONFIG_HIGHMEM
int node;
unsigned long x = 0;
for_each_node_state(node, N_HIGH_MEMORY) {
struct zone *z =
&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
x += zone_page_state(z, NR_FREE_PAGES)
+ zone_page_state(z, NR_INACTIVE)
+ zone_page_state(z, NR_ACTIVE);
}
/*
* Make sure that the number of highmem pages is never larger
* than the number of the total dirtyable memory. This can only
* occur in very strange VM situations but we want to make sure
* that this does not occur.
*/
return min(x, total);
#else
return 0;
#endif
}
示例3: nr_free_highpages
unsigned int nr_free_highpages (void)
{
pg_data_t *pgdat;
unsigned int pages = 0;
for_each_online_pgdat(pgdat) {
pages += zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
NR_FREE_PAGES);
if (zone_movable_is_highmem())
pages += zone_page_state(
&pgdat->node_zones[ZONE_MOVABLE],
NR_FREE_PAGES);
}
return pages;
}
示例4: max_pages
static unsigned long max_pages(unsigned long min_pages)
{
unsigned long node_free_pages, max;
struct zone *zones = NODE_DATA(numa_node_id())->node_zones;
node_free_pages =
#ifdef CONFIG_ZONE_DMA
zone_page_state(&zones[ZONE_DMA], NR_FREE_PAGES) +
#endif
#ifdef CONFIG_ZONE_DMA32
zone_page_state(&zones[ZONE_DMA32], NR_FREE_PAGES) +
#endif
zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES);
max = node_free_pages / FRACTION_OF_NODE_MEM;
return max(max, min_pages);
}
示例5: zone_nr_free_pages
/* Called when a more accurate view of NR_FREE_PAGES is needed */
unsigned long zone_nr_free_pages(struct zone *zone)
{
unsigned long nr_free_pages = zone_page_state(zone, NR_FREE_PAGES);
/*
* While kswapd is awake, it is considered the zone is under some
* memory pressure. Under pressure, there is a risk that
* per-cpu-counter-drift will allow the min watermark to be breached
* potentially causing a live-lock. While kswapd is awake and
* free pages are low, get a better estimate for free pages
*/
if (nr_free_pages < zone->percpu_drift_mark &&
!waitqueue_active(&zone->zone_pgdat->kswapd_wait))
return zone_page_state_snapshot(zone, NR_FREE_PAGES);
return nr_free_pages;
}
示例6: bad_memory_status
int bad_memory_status(void)
{
struct zone *zone;
unsigned long free_pages, min_pages;
for_each_populated_zone(zone) {
if (!strcmp(zone->name, "Normal")) {
free_pages = zone_page_state(zone, NR_FREE_PAGES);
min_pages = min_wmark_pages(zone);
if (free_pages < (min_pages + HIB_PAGE_FREE_DELTA)) {
hib_warn("abort hibernate due to %s memory status: (%lu:%lu)\n",
zone->name, free_pages, min_pages);
return -1;
} else {
hib_warn("%s memory status: (%lu:%lu)\n", zone->name, free_pages,
min_pages);
}
}
}
return 0;
}
示例7: lowmem_shrink
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
{
struct task_struct *tsk;
struct task_struct *selected = NULL;
int rem = 0;
int tasksize;
int i;
short min_score_adj = OOM_SCORE_ADJ_MAX + 1;
int minfree = 0;
int selected_tasksize = 0;
short selected_oom_score_adj;
int array_size = ARRAY_SIZE(lowmem_adj);
int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
int other_file = global_page_state(NR_FILE_PAGES) -
global_page_state(NR_SHMEM);
int print_extra_info = 0;
static unsigned long lowmem_print_extra_info_timeout = 0;
#ifdef CONFIG_MTK_GMO_RAM_OPTIMIZE
int other_anon = global_page_state(NR_INACTIVE_ANON) - global_page_state(NR_ACTIVE_ANON);
#endif
#ifdef CONFIG_MT_ENG_BUILD
/*dump memory info when framework low memory*/
int pid_dump = -1; // process which need to be dump
//int pid_sec_mem = -1;
int max_mem = 0;
static int pid_flm_warn = -1;
static unsigned long flm_warn_timeout = 0;
#endif // CONFIG_MT_ENG_BUILD
/*
* If we already have a death outstanding, then
* bail out right away; indicating to vmscan
* that we have nothing further to offer on
* this pass.
*
*/
if (lowmem_deathpending &&
time_before_eq(jiffies, lowmem_deathpending_timeout))
return -1;
/* We are in MTKPASR stage! */
if (unlikely(current->flags & PF_MTKPASR)) {
return -1;
}
if (!spin_trylock(&lowmem_shrink_lock)){
lowmem_print(4, "lowmem_shrink lock faild\n");
return -1;
}
#ifdef CONFIG_ZRAM
other_file -= total_swapcache_pages();
#endif
#ifdef CONFIG_HIGHMEM
/*
* Check whether it is caused by low memory in normal zone!
* This will help solve over-reclaiming situation while total free pages is enough, but normal zone is under low memory.
*/
if (gfp_zone(sc->gfp_mask) == ZONE_NORMAL) {
int nid;
struct zone *z;
/* Restore other_free */
other_free += totalreserve_pages;
/* Go through all memory nodes & substract (free, file) from ZONE_HIGHMEM */
for_each_online_node(nid) {
z = &NODE_DATA(nid)->node_zones[ZONE_HIGHMEM];
other_free -= zone_page_state(z, NR_FREE_PAGES);
other_file -= zone_page_state(z, NR_FILE_PAGES);
/* Don't substract NR_SHMEM twice! */
other_file += zone_page_state(z, NR_SHMEM);
/* Subtract high watermark of normal zone */
z = &NODE_DATA(nid)->node_zones[ZONE_NORMAL];
other_free -= high_wmark_pages(z);
}
/* Normalize */
other_free *= total_low_ratio;
other_file *= total_low_ratio;
}
示例8: lowmem_shrink
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
{
struct task_struct *p;
struct task_struct *selected = NULL;
int rem = 0;
int tasksize;
int i;
int min_adj = OOM_ADJUST_MAX + 1;
int selected_tasksize = 0;
int selected_oom_adj;
int array_size = ARRAY_SIZE(lowmem_adj);
int other_free = global_page_state(NR_FREE_PAGES);
int other_file = global_page_state(NR_FILE_PAGES) -
global_page_state(NR_SHMEM);
int lru_file = global_page_state(NR_ACTIVE_FILE) +
global_page_state(NR_INACTIVE_FILE);
struct zone *zone;
int fork_boost = 0;
size_t minfree_boosted[6] = {0, 0, 0, 0, 0, 0};
size_t *min_array;
int *adj_array;
if (offlining) {
/* Discount all free space in the section being offlined */
for_each_zone(zone) {
if (zone_idx(zone) == ZONE_MOVABLE) {
other_free -= zone_page_state(zone,
NR_FREE_PAGES);
lowmem_print(4, "lowmem_shrink discounted "
"%lu pages in movable zone\n",
zone_page_state(zone, NR_FREE_PAGES));
}
}
}
/*
* If we already have a death outstanding, then
* bail out right away; indicating to vmscan
* that we have nothing further to offer on
* this pass.
*
*/
if (lowmem_deathpending &&
time_before_eq(jiffies, lowmem_deathpending_timeout)) {
dump_deathpending(lowmem_deathpending);
return 0;
}
if (lowmem_fork_boost &&
time_before_eq(jiffies, lowmem_fork_boost_timeout)) {
for (i = 0; i < lowmem_minfree_size; i++)
minfree_boosted[i] = lowmem_minfree[i] + lowmem_fork_boost_minfree[i] ;
/* Switch to fork_boost adj/minfree within boost_duration */
adj_array = fork_boost_adj;
min_array = minfree_boosted;
} else {
adj_array = lowmem_adj;
min_array = lowmem_minfree;
}
#ifdef CONFIG_SWAP
if(fudgeswap != 0){
struct sysinfo si;
si_swapinfo(&si);
if(si.freeswap > 0){
if(fudgeswap > si.freeswap)
other_file += si.freeswap;
else
other_file += fudgeswap;
}
}
#endif
if (lowmem_adj_size < array_size)
array_size = lowmem_adj_size;
if (lowmem_minfree_size < array_size)
array_size = lowmem_minfree_size;
for (i = 0; i < array_size; i++) {
if (other_free < min_array[i]) {
if (other_file < min_array[i] ||
(lowmem_check_filepages &&
(lru_file < min_array[i]))) {
min_adj = adj_array[i];
fork_boost = lowmem_fork_boost_minfree[i];
break;
}
}
}
if (sc->nr_to_scan > 0)
lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
sc->nr_to_scan, sc->gfp_mask, other_free, other_file,
min_adj);
rem = global_page_state(NR_ACTIVE_ANON) +
global_page_state(NR_ACTIVE_FILE) +
global_page_state(NR_INACTIVE_ANON) +
global_page_state(NR_INACTIVE_FILE);
if (sc->nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) {
lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
sc->nr_to_scan, sc->gfp_mask, rem);
//.........这里部分代码省略.........
示例9: lowmem_shrink
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
{
struct task_struct *tsk;
struct task_struct *selected = NULL;
int rem = 0;
int tasksize;
int i;
int min_score_adj = OOM_SCORE_ADJ_MAX + 1;
int selected_tasksize = 0;
int selected_oom_score_adj;
int array_size = ARRAY_SIZE(lowmem_adj);
int other_free = global_page_state(NR_FREE_PAGES);
int other_file = global_page_state(NR_FILE_PAGES) -
global_page_state(NR_SHMEM);
int lru_file = global_page_state(NR_ACTIVE_FILE) +
global_page_state(NR_INACTIVE_FILE);
struct zone *zone;
if (offlining) {
/* Discount all free space in the section being offlined */
for_each_zone(zone) {
if (zone_idx(zone) == ZONE_MOVABLE) {
other_free -= zone_page_state(zone,
NR_FREE_PAGES);
lowmem_print(4, "lowmem_shrink discounted "
"%lu pages in movable zone\n",
zone_page_state(zone, NR_FREE_PAGES));
}
}
}
#ifdef CONFIG_SWAP
if(fudgeswap != 0){
struct sysinfo si;
si_swapinfo(&si);
if(si.freeswap > 0){
if(fudgeswap > si.freeswap)
other_file += si.freeswap;
else
other_file += fudgeswap;
}
}
#endif
if (lowmem_adj_size < array_size)
array_size = lowmem_adj_size;
if (lowmem_minfree_size < array_size)
array_size = lowmem_minfree_size;
for (i = 0; i < array_size; i++) {
if (other_free < lowmem_minfree[i]) {
if (other_file < lowmem_minfree[i] ||
(lowmem_check_filepages &&
(lru_file < lowmem_minfile[i]))) {
min_score_adj = lowmem_adj[i];
break;
}
}
}
if (sc->nr_to_scan > 0)
lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
sc->nr_to_scan, sc->gfp_mask, other_free, other_file,
min_score_adj);
rem = global_page_state(NR_ACTIVE_ANON) +
global_page_state(NR_ACTIVE_FILE) +
global_page_state(NR_INACTIVE_ANON) +
global_page_state(NR_INACTIVE_FILE);
if (sc->nr_to_scan <= 0 || min_score_adj == OOM_ADJUST_MAX + 1) {
lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
sc->nr_to_scan, sc->gfp_mask, rem);
return rem;
}
selected_oom_score_adj = min_score_adj;
rcu_read_lock();
for_each_process(tsk) {
struct task_struct *p;
int oom_score_adj;
if (tsk->flags & PF_KTHREAD)
continue;
p = find_lock_task_mm(tsk);
if (!p)
continue;
if (test_tsk_thread_flag(p, TIF_MEMDIE) &&
time_before_eq(jiffies, lowmem_deathpending_timeout)) {
task_unlock(p);
rcu_read_unlock();
return 0;
}
oom_score_adj = p->signal->oom_score_adj;
if (oom_score_adj < min_score_adj) {
task_unlock(p);
continue;
}
tasksize = get_mm_rss(p->mm);
task_unlock(p);
//.........这里部分代码省略.........
示例10: lowmem_shrink
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
{
struct task_struct *tsk;
struct task_struct *selected = NULL;
int rem = 0;
int tasksize;
int i;
int min_score_adj = OOM_SCORE_ADJ_MAX + 1;
int minfree = 0;
int selected_tasksize = 0;
int selected_oom_score_adj;
int array_size = ARRAY_SIZE(lowmem_adj);
int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
int other_file = global_page_state(NR_FILE_PAGES) -
global_page_state(NR_SHMEM);
#ifdef CONFIG_MT_ENG_BUILD
int print_extra_info = 0;
static unsigned long lowmem_print_extra_info_timeout = 0;
/*dump memory info when framework low memory*/
int pid_dump = -1; // process which need to be dump
int pid_sec_mem = -1;
int max_mem = 0;
#endif // CONFIG_MT_ENG_BUILD
/*
* If we already have a death outstanding, then
* bail out right away; indicating to vmscan
* that we have nothing further to offer on
* this pass.
*
*/
if (lowmem_deathpending &&
time_before_eq(jiffies, lowmem_deathpending_timeout))
return 0;
#ifdef CONFIG_MT_ENG_BUILD
add_kmem_status_lmk_counter();
#endif
#ifdef CONFIG_SWAP
other_file -= total_swapcache_pages;
#endif
#ifdef CONFIG_HIGHMEM
/*
* Check whether it is caused by low memory in normal zone!
* This will help solve over-reclaiming situation while total free pages is enough, but normal zone is under low memory.
*/
if (gfp_zone(sc->gfp_mask) == ZONE_NORMAL) {
int nid;
struct zone *z;
/* Go through all memory nodes & substract (free, file) from ZONE_HIGHMEM */
for_each_online_node(nid) {
z = &NODE_DATA(nid)->node_zones[ZONE_HIGHMEM];
other_free -= zone_page_state(z, NR_FREE_PAGES);
other_file -= zone_page_state(z, NR_FILE_PAGES);
/* Don't substract it twice! */
other_file += zone_page_state(z, NR_SHMEM);
}
other_free *= total_low_ratio;
other_file *= total_low_ratio;
}
示例11: lowmem_shrink
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
{
struct task_struct *tsk;
#ifdef ENHANCED_LMK_ROUTINE
struct task_struct *selected[LOWMEM_DEATHPENDING_DEPTH] = {NULL,};
#else
struct task_struct *selected = NULL;
#endif
int rem = 0;
int tasksize;
int i;
int min_score_adj = OOM_SCORE_ADJ_MAX + 1;
#ifdef ENHANCED_LMK_ROUTINE
int selected_tasksize[LOWMEM_DEATHPENDING_DEPTH] = {0,};
int selected_oom_score_adj[LOWMEM_DEATHPENDING_DEPTH] = {OOM_ADJUST_MAX,};
int all_selected_oom = 0;
int max_selected_oom_idx = 0;
#else
int selected_tasksize = 0;
int selected_oom_score_adj;
#endif
#ifdef CONFIG_SAMP_HOTNESS
int selected_hotness_adj = 0;
#endif
int array_size = ARRAY_SIZE(lowmem_adj);
int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
int other_file = global_page_state(NR_FILE_PAGES) -
global_page_state(NR_SHMEM);
struct zone *zone;
#if defined(CONFIG_ZRAM_FOR_ANDROID) || defined(CONFIG_ZSWAP)
other_file -= total_swapcache_pages;
#endif
if (offlining) {
/* Discount all free space in the section being offlined */
for_each_zone(zone) {
if (zone_idx(zone) == ZONE_MOVABLE) {
other_free -= zone_page_state(zone,
NR_FREE_PAGES);
lowmem_print(4, "lowmem_shrink discounted "
"%lu pages in movable zone\n",
zone_page_state(zone, NR_FREE_PAGES));
}
}
}
/*
* If we already have a death outstanding, then
* bail out right away; indicating to vmscan
* that we have nothing further to offer on
* this pass.
*
* Note: Currently you need CONFIG_PROFILING
* for this to work correctly.
*/
#ifdef ENHANCED_LMK_ROUTINE
for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) {
if (lowmem_deathpending[i] &&
time_before_eq(jiffies, lowmem_deathpending_timeout))
return 0;
}
#else
if (lowmem_deathpending &&
time_before_eq(jiffies, lowmem_deathpending_timeout))
return 0;
#endif
if (lowmem_adj_size < array_size)
array_size = lowmem_adj_size;
if (lowmem_minfree_size < array_size)
array_size = lowmem_minfree_size;
for (i = 0; i < array_size; i++) {
if (other_free < lowmem_minfree[i] &&
other_file < lowmem_minfree[i]) {
min_score_adj = lowmem_adj[i];
break;
}
}
if (sc->nr_to_scan > 0)
lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
sc->nr_to_scan, sc->gfp_mask, other_free,
other_file, min_score_adj);
rem = global_page_state(NR_ACTIVE_ANON) +
global_page_state(NR_ACTIVE_FILE) +
global_page_state(NR_INACTIVE_ANON) +
global_page_state(NR_INACTIVE_FILE);
if (sc->nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
sc->nr_to_scan, sc->gfp_mask, rem);
return rem;
}
#ifdef ENHANCED_LMK_ROUTINE
for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++)
selected_oom_score_adj[i] = min_score_adj;
#else
selected_oom_score_adj = min_score_adj;
#endif
#ifdef CONFIG_ZRAM_FOR_ANDROID
//.........这里部分代码省略.........
示例12: lowmem_shrink
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
{
struct task_struct *tsk;
struct task_struct *selected = NULL;
int rem = 0;
int tasksize;
int i;
int min_score_adj = OOM_SCORE_ADJ_MAX + 1;
int selected_tasksize = 0;
int selected_oom_score_adj;
int selected_oom_adj = 0;
int array_size = ARRAY_SIZE(lowmem_adj);
int other_free;
int other_file;
int reserved_free = 0;
int cma_free = 0;
unsigned long nr_to_scan = sc->nr_to_scan;
struct zone *zone;
int use_cma = can_use_cma_pages(sc->gfp_mask);
if (nr_to_scan > 0) {
if (!mutex_trylock(&scan_mutex)) {
if (!(lowmem_only_kswapd_sleep && !current_is_kswapd())) {
msleep_interruptible(lowmem_sleep_ms);
}
return 0;
}
}
for_each_zone(zone)
{
if (is_normal(zone))
reserved_free = zone->watermark[WMARK_MIN] + zone->lowmem_reserve[_ZONE];
cma_free += zone_page_state(zone, NR_FREE_CMA_PAGES);
}
other_free = global_page_state(NR_FREE_PAGES);
if (global_page_state(NR_SHMEM) + global_page_state(NR_MLOCK) + total_swapcache_pages <
global_page_state(NR_FILE_PAGES))
other_file = global_page_state(NR_FILE_PAGES) -
global_page_state(NR_SHMEM) -
global_page_state(NR_MLOCK) -
total_swapcache_pages;
else
other_file = 0;
if (lowmem_adj_size < array_size)
array_size = lowmem_adj_size;
if (lowmem_minfree_size < array_size)
array_size = lowmem_minfree_size;
for (i = 0; i < array_size; i++) {
if ((other_free - reserved_free - (use_cma ? 0 : cma_free)) < lowmem_minfree[i] &&
other_file < lowmem_minfree[i]) {
min_score_adj = lowmem_adj[i];
break;
}
}
if (nr_to_scan > 0)
lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d, rfree %d\n",
nr_to_scan, sc->gfp_mask, other_free,
other_file, min_score_adj, reserved_free);
rem = global_page_state(NR_ACTIVE_ANON) +
global_page_state(NR_ACTIVE_FILE) +
global_page_state(NR_INACTIVE_ANON) +
global_page_state(NR_INACTIVE_FILE);
if (nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
nr_to_scan, sc->gfp_mask, rem);
if (nr_to_scan > 0)
mutex_unlock(&scan_mutex);
return rem;
}
selected_oom_score_adj = min_score_adj;
rcu_read_lock();
for_each_process(tsk) {
struct task_struct *p;
int oom_score_adj;
if (tsk->flags & PF_KTHREAD)
continue;
if (test_task_flag(tsk, TIF_MM_RELEASED))
continue;
if (time_before_eq(jiffies, lowmem_deathpending_timeout)) {
if (test_task_flag(tsk, TIF_MEMDIE)) {
lowmem_print(2, "skipping , waiting for process %d (%s) dead\n",
tsk->pid, tsk->comm);
rcu_read_unlock();
if (!(lowmem_only_kswapd_sleep && !current_is_kswapd())) {
msleep_interruptible(lowmem_sleep_ms);
}
//.........这里部分代码省略.........