Merge branch 'pm-sleep'
* pm-sleep: PM / hibernate: Fix rtree_next_node() to avoid walking off list ends x86/power/64: Use __pa() for physical address computation PM / sleep: Update some system sleep documentation
This commit is contained in:
@@ -835,9 +835,9 @@ static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
|
||||
*/
|
||||
static bool rtree_next_node(struct memory_bitmap *bm)
|
||||
{
|
||||
bm->cur.node = list_entry(bm->cur.node->list.next,
|
||||
struct rtree_node, list);
|
||||
if (&bm->cur.node->list != &bm->cur.zone->leaves) {
|
||||
if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
|
||||
bm->cur.node = list_entry(bm->cur.node->list.next,
|
||||
struct rtree_node, list);
|
||||
bm->cur.node_pfn += BM_BITS_PER_BLOCK;
|
||||
bm->cur.node_bit = 0;
|
||||
touch_softlockup_watchdog();
|
||||
@@ -845,9 +845,9 @@ static bool rtree_next_node(struct memory_bitmap *bm)
|
||||
}
|
||||
|
||||
/* No more nodes, goto next zone */
|
||||
bm->cur.zone = list_entry(bm->cur.zone->list.next,
|
||||
if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
|
||||
bm->cur.zone = list_entry(bm->cur.zone->list.next,
|
||||
struct mem_zone_bm_rtree, list);
|
||||
if (&bm->cur.zone->list != &bm->zones) {
|
||||
bm->cur.node = list_entry(bm->cur.zone->leaves.next,
|
||||
struct rtree_node, list);
|
||||
bm->cur.node_pfn = 0;
|
||||
|
Reference in New Issue
Block a user