~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/mips/kernel/cmpxchg.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-or-later
  2 /*
  3  * Copyright (C) 2017 Imagination Technologies
  4  * Author: Paul Burton <paul.burton@mips.com>
  5  */
  6 
  7 #include <linux/bitops.h>
  8 #include <asm/cmpxchg.h>
  9 
 10 unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int size)
 11 {
 12         u32 old32, new32, load32, mask;
 13         volatile u32 *ptr32;
 14         unsigned int shift;
 15 
 16         /* Check that ptr is naturally aligned */
 17         WARN_ON((unsigned long)ptr & (size - 1));
 18 
 19         /* Mask value to the correct size. */
 20         mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
 21         val &= mask;
 22 
 23         /*
 24          * Calculate a shift & mask that correspond to the value we wish to
 25          * exchange within the naturally aligned 4 byte integer that includes
 26          * it.
 27          */
 28         shift = (unsigned long)ptr & 0x3;
 29         if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
 30                 shift ^= sizeof(u32) - size;
 31         shift *= BITS_PER_BYTE;
 32         mask <<= shift;
 33 
 34         /*
 35          * Calculate a pointer to the naturally aligned 4 byte integer that
 36          * includes our byte of interest, and load its value.
 37          */
 38         ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
 39         load32 = *ptr32;
 40 
 41         do {
 42                 old32 = load32;
 43                 new32 = (load32 & ~mask) | (val << shift);
 44                 load32 = arch_cmpxchg(ptr32, old32, new32);
 45         } while (load32 != old32);
 46 
 47         return (load32 & mask) >> shift;
 48 }
 49 
 50 unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
 51                               unsigned long new, unsigned int size)
 52 {
 53         u32 mask, old32, new32, load32, load;
 54         volatile u32 *ptr32;
 55         unsigned int shift;
 56 
 57         /* Check that ptr is naturally aligned */
 58         WARN_ON((unsigned long)ptr & (size - 1));
 59 
 60         /* Mask inputs to the correct size. */
 61         mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
 62         old &= mask;
 63         new &= mask;
 64 
 65         /*
 66          * Calculate a shift & mask that correspond to the value we wish to
 67          * compare & exchange within the naturally aligned 4 byte integer
 68          * that includes it.
 69          */
 70         shift = (unsigned long)ptr & 0x3;
 71         if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
 72                 shift ^= sizeof(u32) - size;
 73         shift *= BITS_PER_BYTE;
 74         mask <<= shift;
 75 
 76         /*
 77          * Calculate a pointer to the naturally aligned 4 byte integer that
 78          * includes our byte of interest, and load its value.
 79          */
 80         ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
 81         load32 = *ptr32;
 82 
 83         while (true) {
 84                 /*
 85                  * Ensure the byte we want to exchange matches the expected
 86                  * old value, and if not then bail.
 87                  */
 88                 load = (load32 & mask) >> shift;
 89                 if (load != old)
 90                         return load;
 91 
 92                 /*
 93                  * Calculate the old & new values of the naturally aligned
 94                  * 4 byte integer that include the byte we want to exchange.
 95                  * Attempt to exchange the old value for the new value, and
 96                  * return if we succeed.
 97                  */
 98                 old32 = (load32 & ~mask) | (old << shift);
 99                 new32 = (load32 & ~mask) | (new << shift);
100                 load32 = arch_cmpxchg(ptr32, old32, new32);
101                 if (load32 == old32)
102                         return old;
103         }
104 }
105 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php