X-Git-Url: http://git.agner.ch/gitweb/?p=linux-drm-fsl-dcu.git;a=blobdiff_plain;f=arch%2Fpowerpc%2Fmm%2Fhash_low_32.S;h=ddceefc06ecc0a5554a650e5dbe690959274943a;hp=bd68df5fa78ac083bc0479946b1085c44934f5b7;hb=f697b677620d04d8c77841745727de85f7e948b1;hpb=a4b47ab9464a8200528fad3101668abdd7379cf9 diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S index bd68df5fa78a..ddceefc06ecc 100644 --- a/arch/powerpc/mm/hash_low_32.S +++ b/arch/powerpc/mm/hash_low_32.S @@ -283,6 +283,7 @@ Hash_msk = (((1 << Hash_bits) - 1) * 64) #define PTEG_SIZE 64 #define LG_PTEG_SIZE 6 #define LDPTEu lwzu +#define LDPTE lwz #define STPTE stw #define CMPPTE cmpw #define PTE_H 0x40 @@ -389,13 +390,30 @@ _GLOBAL(hash_page_patch_C) * and we know there is a definite (although small) speed * advantage to putting the PTE in the primary PTEG, we always * put the PTE in the primary PTEG. + * + * In addition, we skip any slot that is mapping kernel text in + * order to avoid a deadlock when not using BAT mappings if + * trying to hash in the kernel hash code itself after it has + * already taken the hash table lock. This works in conjunction + * with pre-faulting of the kernel text. + * + * If the hash table bucket is full of kernel text entries, we'll + * lockup here but that shouldn't happen */ - addis r4,r7,next_slot@ha + +1: addis r4,r7,next_slot@ha /* get next evict slot */ lwz r6,next_slot@l(r4) - addi r6,r6,PTE_SIZE + addi r6,r6,PTE_SIZE /* search for candidate */ andi. r6,r6,7*PTE_SIZE stw r6,next_slot@l(r4) add r4,r3,r6 + LDPTE r0,PTE_SIZE/2(r4) /* get PTE second word */ + clrrwi r0,r0,12 + lis r6,etext@h + ori r6,r6,etext@l /* get etext */ + tophys(r6,r6) + cmpl cr0,r0,r6 /* compare and try again */ + blt 1b #ifndef CONFIG_SMP /* Store PTE in PTEG */