Reduce or add spaces to clean up code style. No functional changes here. Signed-off-by: Xichao Zhao <zhao.xichao@xxxxxxxx> --- lib/raid6/sse2.c | 126 +++++++++++++++++++++++------------------------ 1 file changed, 63 insertions(+), 63 deletions(-) diff --git a/lib/raid6/sse2.c b/lib/raid6/sse2.c index 2930220249c9..662e0c50fc57 100644 --- a/lib/raid6/sse2.c +++ b/lib/raid6/sse2.c @@ -40,21 +40,21 @@ static void raid6_sse21_gen_syndrome(int disks, size_t bytes, void **ptrs) int d, z, z0; z0 = disks - 3; /* Highest data disk */ - p = dptr[z0+1]; /* XOR parity */ - q = dptr[z0+2]; /* RS syndrome */ + p = dptr[z0 + 1]; /* XOR parity */ + q = dptr[z0 + 2]; /* RS syndrome */ kernel_fpu_begin(); asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0])); asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */ - for ( d = 0 ; d < bytes ; d += 16 ) { + for (d = 0; d < bytes; d += 16) { asm volatile("prefetchnta %0" : : "m" (dptr[z0][d])); asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */ - asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d])); + asm volatile("prefetchnta %0" : : "m" (dptr[z0 - 1][d])); asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */ - asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z0-1][d])); - for ( z = z0-2 ; z >= 0 ; z-- ) { + asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z0 - 1][d])); + for (z = z0 - 2; z >= 0; z--) { asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); asm volatile("pcmpgtb %xmm4,%xmm5"); asm volatile("paddb %xmm4,%xmm4"); @@ -92,19 +92,19 @@ static void raid6_sse21_xor_syndrome(int disks, int start, int stop, int d, z, z0; z0 = stop; /* P/Q right side optimization */ - p = dptr[disks-2]; /* XOR parity */ - q = dptr[disks-1]; /* RS syndrome */ + p = dptr[disks - 2]; /* XOR parity */ + q = dptr[disks - 1]; /* RS syndrome */ kernel_fpu_begin(); asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0])); - for ( d = 0 ; d < bytes ; d += 16 ) { + for (d = 0; d < bytes; d += 16) { asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d])); asm volatile("movdqa %0,%%xmm2" : : "m" (p[d])); asm volatile("pxor %xmm4,%xmm2"); /* P/Q data pages */ - for ( z = z0-1 ; z >= start ; z-- ) { + for (z = z0 - 1; z >= start; z--) { asm volatile("pxor %xmm5,%xmm5"); asm volatile("pcmpgtb %xmm4,%xmm5"); asm volatile("paddb %xmm4,%xmm4"); @@ -115,7 +115,7 @@ static void raid6_sse21_xor_syndrome(int disks, int start, int stop, asm volatile("pxor %xmm5,%xmm4"); } /* P/Q left side optimization */ - for ( z = start-1 ; z >= 0 ; z-- ) { + for (z = start - 1; z >= 0; z--) { asm volatile("pxor %xmm5,%xmm5"); asm volatile("pcmpgtb %xmm4,%xmm5"); asm volatile("paddb %xmm4,%xmm4"); @@ -150,8 +150,8 @@ static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs) int d, z, z0; z0 = disks - 3; /* Highest data disk */ - p = dptr[z0+1]; /* XOR parity */ - q = dptr[z0+2]; /* RS syndrome */ + p = dptr[z0 + 1]; /* XOR parity */ + q = dptr[z0 + 2]; /* RS syndrome */ kernel_fpu_begin(); @@ -160,13 +160,13 @@ static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs) asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */ /* We uniformly assume a single prefetch covers at least 32 bytes */ - for ( d = 0 ; d < bytes ; d += 32 ) { + for (d = 0; d < bytes; d += 32) { asm volatile("prefetchnta %0" : : "m" (dptr[z0][d])); - asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */ - asm volatile("movdqa %0,%%xmm3" : : "m" (dptr[z0][d+16])); /* P[1] */ + asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */ + asm volatile("movdqa %0,%%xmm3" : : "m" (dptr[z0][d + 16])); /* P[1] */ asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */ asm volatile("movdqa %xmm3,%xmm6"); /* Q[1] */ - for ( z = z0-1 ; z >= 0 ; z-- ) { + for (z = z0 - 1; z >= 0; z--) { asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); asm volatile("pcmpgtb %xmm4,%xmm5"); asm volatile("pcmpgtb %xmm6,%xmm7"); @@ -177,7 +177,7 @@ static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs) asm volatile("pxor %xmm5,%xmm4"); asm volatile("pxor %xmm7,%xmm6"); asm volatile("movdqa %0,%%xmm5" : : "m" (dptr[z][d])); - asm volatile("movdqa %0,%%xmm7" : : "m" (dptr[z][d+16])); + asm volatile("movdqa %0,%%xmm7" : : "m" (dptr[z][d + 16])); asm volatile("pxor %xmm5,%xmm2"); asm volatile("pxor %xmm7,%xmm3"); asm volatile("pxor %xmm5,%xmm4"); @@ -203,22 +203,22 @@ static void raid6_sse22_xor_syndrome(int disks, int start, int stop, int d, z, z0; z0 = stop; /* P/Q right side optimization */ - p = dptr[disks-2]; /* XOR parity */ - q = dptr[disks-1]; /* RS syndrome */ + p = dptr[disks - 2]; /* XOR parity */ + q = dptr[disks - 1]; /* RS syndrome */ kernel_fpu_begin(); asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0])); - for ( d = 0 ; d < bytes ; d += 32 ) { + for (d = 0; d < bytes; d += 32) { asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d])); - asm volatile("movdqa %0,%%xmm6" :: "m" (dptr[z0][d+16])); + asm volatile("movdqa %0,%%xmm6" :: "m" (dptr[z0][d + 16])); asm volatile("movdqa %0,%%xmm2" : : "m" (p[d])); - asm volatile("movdqa %0,%%xmm3" : : "m" (p[d+16])); + asm volatile("movdqa %0,%%xmm3" : : "m" (p[d + 16])); asm volatile("pxor %xmm4,%xmm2"); asm volatile("pxor %xmm6,%xmm3"); /* P/Q data pages */ - for ( z = z0-1 ; z >= start ; z-- ) { + for (z = z0 - 1; z >= start; z--) { asm volatile("pxor %xmm5,%xmm5"); asm volatile("pxor %xmm7,%xmm7"); asm volatile("pcmpgtb %xmm4,%xmm5"); @@ -230,14 +230,14 @@ static void raid6_sse22_xor_syndrome(int disks, int start, int stop, asm volatile("pxor %xmm5,%xmm4"); asm volatile("pxor %xmm7,%xmm6"); asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d])); - asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16])); + asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d + 16])); asm volatile("pxor %xmm5,%xmm2"); asm volatile("pxor %xmm7,%xmm3"); asm volatile("pxor %xmm5,%xmm4"); asm volatile("pxor %xmm7,%xmm6"); } /* P/Q left side optimization */ - for ( z = start-1 ; z >= 0 ; z-- ) { + for (z = start - 1; z >= 0; z--) { asm volatile("pxor %xmm5,%xmm5"); asm volatile("pxor %xmm7,%xmm7"); asm volatile("pcmpgtb %xmm4,%xmm5"); @@ -250,12 +250,12 @@ static void raid6_sse22_xor_syndrome(int disks, int start, int stop, asm volatile("pxor %xmm7,%xmm6"); } asm volatile("pxor %0,%%xmm4" : : "m" (q[d])); - asm volatile("pxor %0,%%xmm6" : : "m" (q[d+16])); + asm volatile("pxor %0,%%xmm6" : : "m" (q[d + 16])); /* Don't use movntdq for r/w memory area < cache line */ asm volatile("movdqa %%xmm4,%0" : "=m" (q[d])); - asm volatile("movdqa %%xmm6,%0" : "=m" (q[d+16])); + asm volatile("movdqa %%xmm6,%0" : "=m" (q[d + 16])); asm volatile("movdqa %%xmm2,%0" : "=m" (p[d])); - asm volatile("movdqa %%xmm3,%0" : "=m" (p[d+16])); + asm volatile("movdqa %%xmm3,%0" : "=m" (p[d + 16])); } asm volatile("sfence" : : : "memory"); @@ -282,8 +282,8 @@ static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs) int d, z, z0; z0 = disks - 3; /* Highest data disk */ - p = dptr[z0+1]; /* XOR parity */ - q = dptr[z0+2]; /* RS syndrome */ + p = dptr[z0 + 1]; /* XOR parity */ + q = dptr[z0 + 2]; /* RS syndrome */ kernel_fpu_begin(); @@ -301,11 +301,11 @@ static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs) asm volatile("pxor %xmm14,%xmm14"); /* Q[3] */ asm volatile("pxor %xmm15,%xmm15"); /* Zero temp */ - for ( d = 0 ; d < bytes ; d += 64 ) { - for ( z = z0 ; z >= 0 ; z-- ) { + for (d = 0; d < bytes; d += 64) { + for (z = z0; z >= 0; z--) { /* The second prefetch seems to improve performance... */ asm volatile("prefetchnta %0" :: "m" (dptr[z][d])); - asm volatile("prefetchnta %0" :: "m" (dptr[z][d+32])); + asm volatile("prefetchnta %0" :: "m" (dptr[z][d + 32])); asm volatile("pcmpgtb %xmm4,%xmm5"); asm volatile("pcmpgtb %xmm6,%xmm7"); asm volatile("pcmpgtb %xmm12,%xmm13"); @@ -323,9 +323,9 @@ static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs) asm volatile("pxor %xmm13,%xmm12"); asm volatile("pxor %xmm15,%xmm14"); asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d])); - asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16])); - asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d+32])); - asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d+48])); + asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d + 16])); + asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d + 32])); + asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d + 48])); asm volatile("pxor %xmm5,%xmm2"); asm volatile("pxor %xmm7,%xmm3"); asm volatile("pxor %xmm13,%xmm10"); @@ -341,11 +341,11 @@ static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs) } asm volatile("movntdq %%xmm2,%0" : "=m" (p[d])); asm volatile("pxor %xmm2,%xmm2"); - asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16])); + asm volatile("movntdq %%xmm3,%0" : "=m" (p[d + 16])); asm volatile("pxor %xmm3,%xmm3"); - asm volatile("movntdq %%xmm10,%0" : "=m" (p[d+32])); + asm volatile("movntdq %%xmm10,%0" : "=m" (p[d + 32])); asm volatile("pxor %xmm10,%xmm10"); - asm volatile("movntdq %%xmm11,%0" : "=m" (p[d+48])); + asm volatile("movntdq %%xmm11,%0" : "=m" (p[d + 48])); asm volatile("pxor %xmm11,%xmm11"); asm volatile("movntdq %%xmm4,%0" : "=m" (q[d])); asm volatile("pxor %xmm4,%xmm4"); @@ -369,8 +369,8 @@ static void raid6_sse24_xor_syndrome(int disks, int start, int stop, int d, z, z0; z0 = stop; /* P/Q right side optimization */ - p = dptr[disks-2]; /* XOR parity */ - q = dptr[disks-1]; /* RS syndrome */ + p = dptr[disks - 2]; /* XOR parity */ + q = dptr[disks - 1]; /* RS syndrome */ kernel_fpu_begin(); @@ -378,21 +378,21 @@ static void raid6_sse24_xor_syndrome(int disks, int start, int stop, for ( d = 0 ; d < bytes ; d += 64 ) { asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d])); - asm volatile("movdqa %0,%%xmm6" :: "m" (dptr[z0][d+16])); - asm volatile("movdqa %0,%%xmm12" :: "m" (dptr[z0][d+32])); - asm volatile("movdqa %0,%%xmm14" :: "m" (dptr[z0][d+48])); + asm volatile("movdqa %0,%%xmm6" :: "m" (dptr[z0][d + 16])); + asm volatile("movdqa %0,%%xmm12" :: "m" (dptr[z0][d + 32])); + asm volatile("movdqa %0,%%xmm14" :: "m" (dptr[z0][d + 48])); asm volatile("movdqa %0,%%xmm2" : : "m" (p[d])); - asm volatile("movdqa %0,%%xmm3" : : "m" (p[d+16])); - asm volatile("movdqa %0,%%xmm10" : : "m" (p[d+32])); - asm volatile("movdqa %0,%%xmm11" : : "m" (p[d+48])); + asm volatile("movdqa %0,%%xmm3" : : "m" (p[d + 16])); + asm volatile("movdqa %0,%%xmm10" : : "m" (p[d + 32])); + asm volatile("movdqa %0,%%xmm11" : : "m" (p[d + 48])); asm volatile("pxor %xmm4,%xmm2"); asm volatile("pxor %xmm6,%xmm3"); asm volatile("pxor %xmm12,%xmm10"); asm volatile("pxor %xmm14,%xmm11"); /* P/Q data pages */ - for ( z = z0-1 ; z >= start ; z-- ) { + for (z = z0 - 1; z >= start; z--) { asm volatile("prefetchnta %0" :: "m" (dptr[z][d])); - asm volatile("prefetchnta %0" :: "m" (dptr[z][d+32])); + asm volatile("prefetchnta %0" :: "m" (dptr[z][d + 32])); asm volatile("pxor %xmm5,%xmm5"); asm volatile("pxor %xmm7,%xmm7"); asm volatile("pxor %xmm13,%xmm13"); @@ -414,9 +414,9 @@ static void raid6_sse24_xor_syndrome(int disks, int start, int stop, asm volatile("pxor %xmm13,%xmm12"); asm volatile("pxor %xmm15,%xmm14"); asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d])); - asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16])); - asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d+32])); - asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d+48])); + asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d + 16])); + asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d + 32])); + asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d + 48])); asm volatile("pxor %xmm5,%xmm2"); asm volatile("pxor %xmm7,%xmm3"); asm volatile("pxor %xmm13,%xmm10"); @@ -427,7 +427,7 @@ static void raid6_sse24_xor_syndrome(int disks, int start, int stop, asm volatile("pxor %xmm15,%xmm14"); } asm volatile("prefetchnta %0" :: "m" (q[d])); - asm volatile("prefetchnta %0" :: "m" (q[d+32])); + asm volatile("prefetchnta %0" :: "m" (q[d + 32])); /* P/Q left side optimization */ for ( z = start-1 ; z >= 0 ; z-- ) { asm volatile("pxor %xmm5,%xmm5"); @@ -452,17 +452,17 @@ static void raid6_sse24_xor_syndrome(int disks, int start, int stop, asm volatile("pxor %xmm15,%xmm14"); } asm volatile("movntdq %%xmm2,%0" : "=m" (p[d])); - asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16])); - asm volatile("movntdq %%xmm10,%0" : "=m" (p[d+32])); - asm volatile("movntdq %%xmm11,%0" : "=m" (p[d+48])); + asm volatile("movntdq %%xmm3,%0" : "=m" (p[d + 16])); + asm volatile("movntdq %%xmm10,%0" : "=m" (p[d + 32])); + asm volatile("movntdq %%xmm11,%0" : "=m" (p[d + 48])); asm volatile("pxor %0,%%xmm4" : : "m" (q[d])); - asm volatile("pxor %0,%%xmm6" : : "m" (q[d+16])); - asm volatile("pxor %0,%%xmm12" : : "m" (q[d+32])); - asm volatile("pxor %0,%%xmm14" : : "m" (q[d+48])); + asm volatile("pxor %0,%%xmm6" : : "m" (q[d + 16])); + asm volatile("pxor %0,%%xmm12" : : "m" (q[d + 32])); + asm volatile("pxor %0,%%xmm14" : : "m" (q[d + 48])); asm volatile("movntdq %%xmm4,%0" : "=m" (q[d])); - asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16])); - asm volatile("movntdq %%xmm12,%0" : "=m" (q[d+32])); - asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48])); + asm volatile("movntdq %%xmm6,%0" : "=m" (q[d + 16])); + asm volatile("movntdq %%xmm12,%0" : "=m" (q[d + 32])); + asm volatile("movntdq %%xmm14,%0" : "=m" (q[d + 48])); } asm volatile("sfence" : : : "memory"); kernel_fpu_end(); -- 2.34.1