1 /* 1 2 * VMAC: Message Authentication Code using Uni 3 * 4 * Reference: https://tools.ietf.org/html/draf 5 * 6 * Copyright (c) 2009, Intel Corporation. 7 * Copyright (c) 2018, Google Inc. 8 * 9 * This program is free software; you can redi 10 * under the terms and conditions of the GNU G 11 * version 2, as published by the Free Softwar 12 * 13 * This program is distributed in the hope it 14 * ANY WARRANTY; without even the implied warr 15 * FITNESS FOR A PARTICULAR PURPOSE. See the 16 * more details. 17 * 18 * You should have received a copy of the GNU 19 * this program; if not, write to the Free Sof 20 * Place - Suite 330, Boston, MA 02111-1307 US 21 */ 22 23 /* 24 * Derived from: 25 * VMAC and VHASH Implementation by Ted K 26 * This implementation is herby placed in 27 * The authors offers no warranty. Use at 28 * Last modified: 17 APR 08, 1700 PDT 29 */ 30 31 #include <linux/unaligned.h> 32 #include <linux/init.h> 33 #include <linux/types.h> 34 #include <linux/crypto.h> 35 #include <linux/module.h> 36 #include <linux/scatterlist.h> 37 #include <asm/byteorder.h> 38 #include <crypto/scatterwalk.h> 39 #include <crypto/internal/cipher.h> 40 #include <crypto/internal/hash.h> 41 42 /* 43 * User definable settings. 44 */ 45 #define VMAC_TAG_LEN 64 46 #define VMAC_KEY_SIZE 128/* Must be 128, 192 47 #define VMAC_KEY_LEN (VMAC_KEY_SIZE/8) 48 #define VMAC_NHBYTES 128/* Must 2^i for any 49 #define VMAC_NONCEBYTES 16 50 51 /* per-transform (per-key) context */ 52 struct vmac_tfm_ctx { 53 struct crypto_cipher *cipher; 54 u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG 55 u64 polykey[2*VMAC_TAG_LEN/64]; 56 u64 l3key[2*VMAC_TAG_LEN/64]; 57 }; 58 59 /* per-request context */ 60 struct vmac_desc_ctx { 61 union { 62 u8 partial[VMAC_NHBYTES]; 63 __le64 partial_words[VMAC_NHBY 64 }; 65 unsigned int partial_size; /* siz 66 bool first_block_processed; 67 u64 polytmp[2*VMAC_TAG_LEN/64]; /* run 68 union { 69 u8 bytes[VMAC_NONCEBYTES]; 70 __be64 pads[VMAC_NONCEBYTES / 71 } nonce; 72 unsigned int nonce_size; /* nonce byte 73 }; 74 75 /* 76 * Constants and masks 77 */ 78 #define UINT64_C(x) x##ULL 79 static const u64 p64 = UINT64_C(0xffffffffff 80 static const u64 m62 = UINT64_C(0x3fffffffff 81 static const u64 m63 = UINT64_C(0x7fffffffff 82 static const u64 m64 = UINT64_C(0xffffffffff 83 static const u64 mpoly = UINT64_C(0x1fffffff1f 84 85 #define pe64_to_cpup le64_to_cpup 86 87 #ifdef __LITTLE_ENDIAN 88 #define INDEX_HIGH 1 89 #define INDEX_LOW 0 90 #else 91 #define INDEX_HIGH 0 92 #define INDEX_LOW 1 93 #endif 94 95 /* 96 * The following routines are used in this imp 97 * written via macros to simulate zero-overhea 98 * 99 * MUL64: 64x64->128-bit multiplication 100 * PMUL64: assumes top bits cleared on inputs 101 * ADD128: 128x128->128-bit addition 102 */ 103 104 #define ADD128(rh, rl, ih, il) 105 do { 106 u64 _il = (il); 107 (rl) += (_il); 108 if ((rl) < (_il)) 109 (rh)++; 110 (rh) += (ih); 111 } while (0) 112 113 #define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)( 114 115 #define PMUL64(rh, rl, i1, i2) /* Assumes m d 116 do { 117 u64 _i1 = (i1), _i2 = (i2); 118 u64 m = MUL32(_i1, _i2>>32) + 119 rh = MUL32(_i1>>32, _i2>>32); 120 rl = MUL32(_i1, _i2); 121 ADD128(rh, rl, (m >> 32), (m < 122 } while (0) 123 124 #define MUL64(rh, rl, i1, i2) 125 do { 126 u64 _i1 = (i1), _i2 = (i2); 127 u64 m1 = MUL32(_i1, _i2>>32); 128 u64 m2 = MUL32(_i1>>32, _i2); 129 rh = MUL32(_i1>>32, _i2>>32); 130 rl = MUL32(_i1, _i2); 131 ADD128(rh, rl, (m1 >> 32), (m1 132 ADD128(rh, rl, (m2 >> 32), (m2 133 } while (0) 134 135 /* 136 * For highest performance the L1 NH and L2 po 137 * carefully implemented to take advantage of 138 * Here these two hash functions are defined m 139 * 64-bit architectures, once for 32-bit SSE2 140 * for the rest (32-bit) architectures. 141 * For each, nh_16 *must* be defined (works on 142 * Optionally, nh_vmac_nhbytes can be defined 143 * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhby 144 * NH computations at once). 145 */ 146 147 #ifdef CONFIG_64BIT 148 149 #define nh_16(mp, kp, nw, rh, rl) 150 do { 151 int i; u64 th, tl; 152 rh = rl = 0; 153 for (i = 0; i < nw; i += 2) { 154 MUL64(th, tl, pe64_to_ 155 pe64_to_cpup(( 156 ADD128(rh, rl, th, tl) 157 } 158 } while (0) 159 160 #define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) 161 do { 162 int i; u64 th, tl; 163 rh1 = rl1 = rh = rl = 0; 164 for (i = 0; i < nw; i += 2) { 165 MUL64(th, tl, pe64_to_ 166 pe64_to_cpup(( 167 ADD128(rh, rl, th, tl) 168 MUL64(th, tl, pe64_to_ 169 pe64_to_cpup(( 170 ADD128(rh1, rl1, th, t 171 } 172 } while (0) 173 174 #if (VMAC_NHBYTES >= 64) /* These versions do 175 #define nh_vmac_nhbytes(mp, kp, nw, rh, rl) 176 do { 177 int i; u64 th, tl; 178 rh = rl = 0; 179 for (i = 0; i < nw; i += 8) { 180 MUL64(th, tl, pe64_to_ 181 pe64_to_cpup(( 182 ADD128(rh, rl, th, tl) 183 MUL64(th, tl, pe64_to_ 184 pe64_to_cpup(( 185 ADD128(rh, rl, th, tl) 186 MUL64(th, tl, pe64_to_ 187 pe64_to_cpup(( 188 ADD128(rh, rl, th, tl) 189 MUL64(th, tl, pe64_to_ 190 pe64_to_cpup(( 191 ADD128(rh, rl, th, tl) 192 } 193 } while (0) 194 195 #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, 196 do { 197 int i; u64 th, tl; 198 rh1 = rl1 = rh = rl = 0; 199 for (i = 0; i < nw; i += 8) { 200 MUL64(th, tl, pe64_to_ 201 pe64_to_cpup(( 202 ADD128(rh, rl, th, tl) 203 MUL64(th, tl, pe64_to_ 204 pe64_to_cpup(( 205 ADD128(rh1, rl1, th, t 206 MUL64(th, tl, pe64_to_ 207 pe64_to_cpup(( 208 ADD128(rh, rl, th, tl) 209 MUL64(th, tl, pe64_to_ 210 pe64_to_cpup(( 211 ADD128(rh1, rl1, th, t 212 MUL64(th, tl, pe64_to_ 213 pe64_to_cpup(( 214 ADD128(rh, rl, th, tl) 215 MUL64(th, tl, pe64_to_ 216 pe64_to_cpup(( 217 ADD128(rh1, rl1, th, t 218 MUL64(th, tl, pe64_to_ 219 pe64_to_cpup(( 220 ADD128(rh, rl, th, tl) 221 MUL64(th, tl, pe64_to_ 222 pe64_to_cpup(( 223 ADD128(rh1, rl1, th, t 224 } 225 } while (0) 226 #endif 227 228 #define poly_step(ah, al, kh, kl, mh, ml) 229 do { 230 u64 t1h, t1l, t2h, t2l, t3h, t 231 /* compute ab*cd, put bd into 232 PMUL64(t3h, t3l, al, kh); 233 PMUL64(t2h, t2l, ah, kl); 234 PMUL64(t1h, t1l, ah, 2*kh); 235 PMUL64(ah, al, al, kl); 236 /* add 2 * ac to result */ 237 ADD128(ah, al, t1h, t1l); 238 /* add together ad + bc */ 239 ADD128(t2h, t2l, t3h, t3l); 240 /* now (ah,al), (t2l,2*t2h) ne 241 /* first add the high register 242 ADD128(t2h, ah, z, t2l); 243 /* double t2h and add top bit 244 t2h = 2 * t2h + (ah >> 63); 245 ah &= m63; 246 /* now add the low registers * 247 ADD128(ah, al, mh, ml); 248 ADD128(ah, al, z, t2h); 249 } while (0) 250 251 #else /* ! CONFIG_64BIT */ 252 253 #ifndef nh_16 254 #define nh_16(mp, kp, nw, rh, rl) 255 do { 256 u64 t1, t2, m1, m2, t; 257 int i; 258 rh = rl = t = 0; 259 for (i = 0; i < nw; i += 2) { 260 t1 = pe64_to_cpup(mp+i 261 t2 = pe64_to_cpup(mp+i 262 m2 = MUL32(t1 >> 32, t 263 m1 = MUL32(t1, t2 >> 3 264 ADD128(rh, rl, MUL32(t 265 MUL32(t1, t2)) 266 rh += (u64)(u32)(m1 >> 267 + (u32)(m2 >> 268 t += (u64)(u32)m1 + (u 269 } 270 ADD128(rh, rl, (t >> 32), (t < 271 } while (0) 272 #endif 273 274 static void poly_step_func(u64 *ahi, u64 *alo, 275 const u64 *kh, const u 276 const u64 *mh, const u 277 { 278 #define a0 (*(((u32 *)alo)+INDEX_LOW)) 279 #define a1 (*(((u32 *)alo)+INDEX_HIGH)) 280 #define a2 (*(((u32 *)ahi)+INDEX_LOW)) 281 #define a3 (*(((u32 *)ahi)+INDEX_HIGH)) 282 #define k0 (*(((u32 *)kl)+INDEX_LOW)) 283 #define k1 (*(((u32 *)kl)+INDEX_HIGH)) 284 #define k2 (*(((u32 *)kh)+INDEX_LOW)) 285 #define k3 (*(((u32 *)kh)+INDEX_HIGH)) 286 287 u64 p, q, t; 288 u32 t2; 289 290 p = MUL32(a3, k3); 291 p += p; 292 p += *(u64 *)mh; 293 p += MUL32(a0, k2); 294 p += MUL32(a1, k1); 295 p += MUL32(a2, k0); 296 t = (u32)(p); 297 p >>= 32; 298 p += MUL32(a0, k3); 299 p += MUL32(a1, k2); 300 p += MUL32(a2, k1); 301 p += MUL32(a3, k0); 302 t |= ((u64)((u32)p & 0x7fffffff)) << 3 303 p >>= 31; 304 p += (u64)(((u32 *)ml)[INDEX_LOW]); 305 p += MUL32(a0, k0); 306 q = MUL32(a1, k3); 307 q += MUL32(a2, k2); 308 q += MUL32(a3, k1); 309 q += q; 310 p += q; 311 t2 = (u32)(p); 312 p >>= 32; 313 p += (u64)(((u32 *)ml)[INDEX_HIGH]); 314 p += MUL32(a0, k1); 315 p += MUL32(a1, k0); 316 q = MUL32(a2, k3); 317 q += MUL32(a3, k2); 318 q += q; 319 p += q; 320 *(u64 *)(alo) = (p << 32) | t2; 321 p >>= 32; 322 *(u64 *)(ahi) = p + t; 323 324 #undef a0 325 #undef a1 326 #undef a2 327 #undef a3 328 #undef k0 329 #undef k1 330 #undef k2 331 #undef k3 332 } 333 334 #define poly_step(ah, al, kh, kl, mh, ml) 335 poly_step_func(&(ah), &(al), &(kh), &( 336 337 #endif /* end of specialized NH and poly defi 338 339 /* At least nh_16 is defined. Defined others a 340 #ifndef nh_16_2 341 #define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) 342 do { 343 nh_16(mp, kp, nw, rh, rl); 344 nh_16(mp, ((kp)+2), nw, rh2, r 345 } while (0) 346 #endif 347 #ifndef nh_vmac_nhbytes 348 #define nh_vmac_nhbytes(mp, kp, nw, rh, rl) 349 nh_16(mp, kp, nw, rh, rl) 350 #endif 351 #ifndef nh_vmac_nhbytes_2 352 #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, 353 do { 354 nh_vmac_nhbytes(mp, kp, nw, rh 355 nh_vmac_nhbytes(mp, ((kp)+2), 356 } while (0) 357 #endif 358 359 static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 360 { 361 u64 rh, rl, t, z = 0; 362 363 /* fully reduce (p1,p2)+(len,0) mod p1 364 t = p1 >> 63; 365 p1 &= m63; 366 ADD128(p1, p2, len, t); 367 /* At this point, (p1,p2) is at most 2 368 t = (p1 > m63) + ((p1 == m63) && (p2 = 369 ADD128(p1, p2, z, t); 370 p1 &= m63; 371 372 /* compute (p1,p2)/(2^64-2^32) and (p1 373 t = p1 + (p2 >> 32); 374 t += (t >> 32); 375 t += (u32)t > 0xfffffffeu; 376 p1 += (t >> 32); 377 p2 += (p1 << 32); 378 379 /* compute (p1+k1)%p64 and (p2+k2)%p64 380 p1 += k1; 381 p1 += (0 - (p1 < k1)) & 257; 382 p2 += k2; 383 p2 += (0 - (p2 < k2)) & 257; 384 385 /* compute (p1+k1)*(p2+k2)%p64 */ 386 MUL64(rh, rl, p1, p2); 387 t = rh >> 56; 388 ADD128(t, rl, z, rh); 389 rh <<= 8; 390 ADD128(t, rl, z, rh); 391 t += t << 8; 392 rl += t; 393 rl += (0 - (rl < t)) & 257; 394 rl += (0 - (rl > p64-1)) & 257; 395 return rl; 396 } 397 398 /* L1 and L2-hash one or more VMAC_NHBYTES-byt 399 static void vhash_blocks(const struct vmac_tfm 400 struct vmac_desc_ctx 401 const __le64 *mptr, u 402 { 403 const u64 *kptr = tctx->nhkey; 404 const u64 pkh = tctx->polykey[0]; 405 const u64 pkl = tctx->polykey[1]; 406 u64 ch = dctx->polytmp[0]; 407 u64 cl = dctx->polytmp[1]; 408 u64 rh, rl; 409 410 if (!dctx->first_block_processed) { 411 dctx->first_block_processed = 412 nh_vmac_nhbytes(mptr, kptr, VM 413 rh &= m62; 414 ADD128(ch, cl, rh, rl); 415 mptr += (VMAC_NHBYTES/sizeof(u 416 blocks--; 417 } 418 419 while (blocks--) { 420 nh_vmac_nhbytes(mptr, kptr, VM 421 rh &= m62; 422 poly_step(ch, cl, pkh, pkl, rh 423 mptr += (VMAC_NHBYTES/sizeof(u 424 } 425 426 dctx->polytmp[0] = ch; 427 dctx->polytmp[1] = cl; 428 } 429 430 static int vmac_setkey(struct crypto_shash *tf 431 const u8 *key, unsigned 432 { 433 struct vmac_tfm_ctx *tctx = crypto_sha 434 __be64 out[2]; 435 u8 in[16] = { 0 }; 436 unsigned int i; 437 int err; 438 439 if (keylen != VMAC_KEY_LEN) 440 return -EINVAL; 441 442 err = crypto_cipher_setkey(tctx->ciphe 443 if (err) 444 return err; 445 446 /* Fill nh key */ 447 in[0] = 0x80; 448 for (i = 0; i < ARRAY_SIZE(tctx->nhkey 449 crypto_cipher_encrypt_one(tctx 450 tctx->nhkey[i] = be64_to_cpu(o 451 tctx->nhkey[i+1] = be64_to_cpu 452 in[15]++; 453 } 454 455 /* Fill poly key */ 456 in[0] = 0xC0; 457 in[15] = 0; 458 for (i = 0; i < ARRAY_SIZE(tctx->polyk 459 crypto_cipher_encrypt_one(tctx 460 tctx->polykey[i] = be64_to_cpu 461 tctx->polykey[i+1] = be64_to_c 462 in[15]++; 463 } 464 465 /* Fill ip key */ 466 in[0] = 0xE0; 467 in[15] = 0; 468 for (i = 0; i < ARRAY_SIZE(tctx->l3key 469 do { 470 crypto_cipher_encrypt_ 471 tctx->l3key[i] = be64_ 472 tctx->l3key[i+1] = be6 473 in[15]++; 474 } while (tctx->l3key[i] >= p64 475 } 476 477 return 0; 478 } 479 480 static int vmac_init(struct shash_desc *desc) 481 { 482 const struct vmac_tfm_ctx *tctx = cryp 483 struct vmac_desc_ctx *dctx = shash_des 484 485 dctx->partial_size = 0; 486 dctx->first_block_processed = false; 487 memcpy(dctx->polytmp, tctx->polykey, s 488 dctx->nonce_size = 0; 489 return 0; 490 } 491 492 static int vmac_update(struct shash_desc *desc 493 { 494 const struct vmac_tfm_ctx *tctx = cryp 495 struct vmac_desc_ctx *dctx = shash_des 496 unsigned int n; 497 498 /* Nonce is passed as first VMAC_NONCE 499 if (dctx->nonce_size < VMAC_NONCEBYTES 500 n = min(len, VMAC_NONCEBYTES - 501 memcpy(&dctx->nonce.bytes[dctx 502 dctx->nonce_size += n; 503 p += n; 504 len -= n; 505 } 506 507 if (dctx->partial_size) { 508 n = min(len, VMAC_NHBYTES - dc 509 memcpy(&dctx->partial[dctx->pa 510 dctx->partial_size += n; 511 p += n; 512 len -= n; 513 if (dctx->partial_size == VMAC 514 vhash_blocks(tctx, dct 515 dctx->partial_size = 0 516 } 517 } 518 519 if (len >= VMAC_NHBYTES) { 520 n = round_down(len, VMAC_NHBYT 521 /* TODO: 'p' may be misaligned 522 vhash_blocks(tctx, dctx, (cons 523 p += n; 524 len -= n; 525 } 526 527 if (len) { 528 memcpy(dctx->partial, p, len); 529 dctx->partial_size = len; 530 } 531 532 return 0; 533 } 534 535 static u64 vhash_final(const struct vmac_tfm_c 536 struct vmac_desc_ctx *d 537 { 538 unsigned int partial = dctx->partial_s 539 u64 ch = dctx->polytmp[0]; 540 u64 cl = dctx->polytmp[1]; 541 542 /* L1 and L2-hash the final block if n 543 if (partial) { 544 /* Zero-pad to next 128-bit bo 545 unsigned int n = round_up(part 546 u64 rh, rl; 547 548 memset(&dctx->partial[partial] 549 nh_16(dctx->partial_words, tct 550 rh &= m62; 551 if (dctx->first_block_processe 552 poly_step(ch, cl, tctx 553 rh, rl); 554 else 555 ADD128(ch, cl, rh, rl) 556 } 557 558 /* L3-hash the 128-bit output of L2-ha 559 return l3hash(ch, cl, tctx->l3key[0], 560 } 561 562 static int vmac_final(struct shash_desc *desc, 563 { 564 const struct vmac_tfm_ctx *tctx = cryp 565 struct vmac_desc_ctx *dctx = shash_des 566 int index; 567 u64 hash, pad; 568 569 if (dctx->nonce_size != VMAC_NONCEBYTE 570 return -EINVAL; 571 572 /* 573 * The VMAC specification requires a n 574 * the block cipher's block length, so 575 * nonce. We define the unused bit to 576 * it be 0, so the needed prepending o 577 */ 578 if (dctx->nonce.bytes[0] & 0x80) 579 return -EINVAL; 580 581 /* Finish calculating the VHASH of the 582 hash = vhash_final(tctx, dctx); 583 584 /* Generate pseudorandom pad by encryp 585 BUILD_BUG_ON(VMAC_NONCEBYTES != 2 * (V 586 index = dctx->nonce.bytes[VMAC_NONCEBY 587 dctx->nonce.bytes[VMAC_NONCEBYTES - 1] 588 crypto_cipher_encrypt_one(tctx->cipher 589 dctx->nonce. 590 pad = be64_to_cpu(dctx->nonce.pads[ind 591 592 /* The VMAC is the sum of VHASH and th 593 put_unaligned_be64(hash + pad, out); 594 return 0; 595 } 596 597 static int vmac_init_tfm(struct crypto_tfm *tf 598 { 599 struct crypto_instance *inst = crypto_ 600 struct crypto_cipher_spawn *spawn = cr 601 struct vmac_tfm_ctx *tctx = crypto_tfm 602 struct crypto_cipher *cipher; 603 604 cipher = crypto_spawn_cipher(spawn); 605 if (IS_ERR(cipher)) 606 return PTR_ERR(cipher); 607 608 tctx->cipher = cipher; 609 return 0; 610 } 611 612 static void vmac_exit_tfm(struct crypto_tfm *t 613 { 614 struct vmac_tfm_ctx *tctx = crypto_tfm 615 616 crypto_free_cipher(tctx->cipher); 617 } 618 619 static int vmac_create(struct crypto_template 620 { 621 struct shash_instance *inst; 622 struct crypto_cipher_spawn *spawn; 623 struct crypto_alg *alg; 624 u32 mask; 625 int err; 626 627 err = crypto_check_attr_type(tb, CRYPT 628 if (err) 629 return err; 630 631 inst = kzalloc(sizeof(*inst) + sizeof( 632 if (!inst) 633 return -ENOMEM; 634 spawn = shash_instance_ctx(inst); 635 636 err = crypto_grab_cipher(spawn, shash_ 637 crypto_attr_a 638 if (err) 639 goto err_free_inst; 640 alg = crypto_spawn_cipher_alg(spawn); 641 642 err = -EINVAL; 643 if (alg->cra_blocksize != VMAC_NONCEBY 644 goto err_free_inst; 645 646 err = crypto_inst_setname(shash_crypto 647 if (err) 648 goto err_free_inst; 649 650 inst->alg.base.cra_priority = alg->cra 651 inst->alg.base.cra_blocksize = alg->cr 652 653 inst->alg.base.cra_ctxsize = sizeof(st 654 inst->alg.base.cra_init = vmac_init_tf 655 inst->alg.base.cra_exit = vmac_exit_tf 656 657 inst->alg.descsize = sizeof(struct vma 658 inst->alg.digestsize = VMAC_TAG_LEN / 659 inst->alg.init = vmac_init; 660 inst->alg.update = vmac_update; 661 inst->alg.final = vmac_final; 662 inst->alg.setkey = vmac_setkey; 663 664 inst->free = shash_free_singlespawn_in 665 666 err = shash_register_instance(tmpl, in 667 if (err) { 668 err_free_inst: 669 shash_free_singlespawn_instanc 670 } 671 return err; 672 } 673 674 static struct crypto_template vmac64_tmpl = { 675 .name = "vmac64", 676 .create = vmac_create, 677 .module = THIS_MODULE, 678 }; 679 680 static int __init vmac_module_init(void) 681 { 682 return crypto_register_template(&vmac6 683 } 684 685 static void __exit vmac_module_exit(void) 686 { 687 crypto_unregister_template(&vmac64_tmp 688 } 689 690 subsys_initcall(vmac_module_init); 691 module_exit(vmac_module_exit); 692 693 MODULE_LICENSE("GPL"); 694 MODULE_DESCRIPTION("VMAC hash algorithm"); 695 MODULE_ALIAS_CRYPTO("vmac64"); 696 MODULE_IMPORT_NS(CRYPTO_INTERNAL); 697
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.