1 /* 2 * Copyright 2015 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package android.hardware.camera2.cts.rs; 18 19 import android.graphics.Bitmap; 20 import android.hardware.camera2.CameraCharacteristics; 21 import android.hardware.camera2.CameraMetadata; 22 import android.hardware.camera2.CaptureResult; 23 import android.hardware.camera2.params.ColorSpaceTransform; 24 import android.hardware.camera2.params.LensShadingMap; 25 import android.util.Log; 26 import android.util.Rational; 27 import android.util.SparseIntArray; 28 29 import java.util.Arrays; 30 31 /** 32 * Utility class providing methods for rendering RAW16 images into other colorspaces. 33 */ 34 public class RawConverter { 35 private static final String TAG = "RawConverter"; 36 private static final boolean DEBUG = Log.isLoggable(TAG, Log.DEBUG); 37 38 /** 39 * Matrix to convert from CIE XYZ colorspace to sRGB, Bradford-adapted to D65. 40 */ 41 private static final float[] sXYZtoRGBBradford = new float[] { 42 3.1338561f, -1.6168667f, -0.4906146f, 43 -0.9787684f, 1.9161415f, 0.0334540f, 44 0.0719453f, -0.2289914f, 1.4052427f 45 }; 46 47 /** 48 * Matrix to convert from the ProPhoto RGB colorspace to CIE XYZ colorspace. 49 */ 50 private static final float[] sProPhotoToXYZ = new float[] { 51 0.797779f, 0.135213f, 0.031303f, 52 0.288000f, 0.711900f, 0.000100f, 53 0.000000f, 0.000000f, 0.825105f 54 }; 55 56 /** 57 * Matrix to convert from CIE XYZ colorspace to ProPhoto RGB colorspace. 58 */ 59 private static final float[] sXYZtoProPhoto = new float[] { 60 1.345753f, -0.255603f, -0.051025f, 61 -0.544426f, 1.508096f, 0.020472f, 62 0.000000f, 0.000000f, 1.211968f 63 }; 64 65 /** 66 * Coefficients for a 3rd order polynomial, ordered from highest to lowest power. This 67 * polynomial approximates the default tonemapping curve used for ACR3. 68 */ 69 private static final float[] DEFAULT_ACR3_TONEMAP_CURVE_COEFFS = new float[] { 70 -0.7836f, 0.8469f, 0.943f, 0.0209f 71 }; 72 73 /** 74 * The D50 whitepoint coordinates in CIE XYZ colorspace. 75 */ 76 private static final float[] D50_XYZ = new float[] { 0.9642f, 1, 0.8249f }; 77 78 /** 79 * An array containing the color temperatures for standard reference illuminants. 80 */ 81 private static final SparseIntArray sStandardIlluminants = new SparseIntArray(); 82 private static final int NO_ILLUMINANT = -1; 83 static { sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_CLOUDY_WEATHER, 6504)84 sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_CLOUDY_WEATHER, 85 6504); sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_DAYLIGHT, 6504)86 sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_DAYLIGHT, 6504); sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_D65, 6504)87 sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_D65, 6504); sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_FINE_WEATHER, 5003)88 sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_FINE_WEATHER, 5003); sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_D50, 5003)89 sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_D50, 5003); sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_FLASH, 5503)90 sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_FLASH, 5503); sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_D55, 5503)91 sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_D55, 5503); sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_SHADE, 7504)92 sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_SHADE, 7504); sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_D75, 7504)93 sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_D75, 7504); sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_ISO_STUDIO_TUNGSTEN, 2856)94 sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_ISO_STUDIO_TUNGSTEN, 95 2856); sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_TUNGSTEN, 2856)96 sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_TUNGSTEN, 2856); sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_STANDARD_A, 2856)97 sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_STANDARD_A, 2856); sStandardIlluminants.append( CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_DAY_WHITE_FLUORESCENT, 4874)98 sStandardIlluminants.append( 99 CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_DAY_WHITE_FLUORESCENT, 4874); sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_STANDARD_B, 4874)100 sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_STANDARD_B, 4874); sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_STANDARD_C, 6774)101 sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_STANDARD_C, 6774); sStandardIlluminants.append( CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_DAYLIGHT_FLUORESCENT, 6430)102 sStandardIlluminants.append( 103 CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_DAYLIGHT_FLUORESCENT, 6430); sStandardIlluminants.append( CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_COOL_WHITE_FLUORESCENT, 4230)104 sStandardIlluminants.append( 105 CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_COOL_WHITE_FLUORESCENT, 4230); sStandardIlluminants.append( CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_WHITE_FLUORESCENT, 3450)106 sStandardIlluminants.append( 107 CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_WHITE_FLUORESCENT, 3450); sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_FLUORESCENT, 2940)108 sStandardIlluminants.append(CameraMetadata.SENSOR_REFERENCE_ILLUMINANT1_FLUORESCENT, 2940); 109 } 110 111 /** 112 * Utility class wrapping Bayer specific DNG metadata. 113 */ 114 static class DngBayerMetadata { 115 public final int referenceIlluminant1; 116 public final int referenceIlluminant2; 117 public final float[] calibrationTransform1; 118 public final float[] calibrationTransform2; 119 public final float[] colorMatrix1; 120 public final float[] colorMatrix2; 121 public final float[] forwardTransform1; 122 public final float[] forwardTransform2; 123 public final Rational[/*3*/] neutralColorPoint; 124 125 /** 126 * Convert a 9x9 {@link ColorSpaceTransform} to a matrix and write the matrix into the 127 * output. 128 * 129 * @param xform a {@link ColorSpaceTransform} to transform. 130 * @param output the 3x3 matrix to overwrite. 131 */ convertColorspaceTransform(ColorSpaceTransform xform, float[] output)132 private static void convertColorspaceTransform(ColorSpaceTransform xform, 133 /*out*/float[] output) { 134 for (int i = 0; i < 3; i++) { 135 for (int j = 0; j < 3; j++) { 136 output[i * 3 + j] = xform.getElement(j, i).floatValue(); 137 } 138 } 139 } 140 141 /** 142 * Constructor to parse static and dynamic metadata into DNG metadata. 143 */ DngBayerMetadata(CameraCharacteristics staticMetadata, CaptureResult dynamicMetadata)144 public DngBayerMetadata(CameraCharacteristics staticMetadata, 145 CaptureResult dynamicMetadata) { 146 referenceIlluminant1 = 147 staticMetadata.get(CameraCharacteristics.SENSOR_REFERENCE_ILLUMINANT1); 148 if (staticMetadata.get(CameraCharacteristics.SENSOR_REFERENCE_ILLUMINANT2) != null) { 149 referenceIlluminant2 = 150 staticMetadata.get(CameraCharacteristics.SENSOR_REFERENCE_ILLUMINANT2); 151 } else { 152 referenceIlluminant2 = referenceIlluminant1; 153 } 154 calibrationTransform1 = new float[9]; 155 calibrationTransform2 = new float[9]; 156 convertColorspaceTransform( 157 staticMetadata.get(CameraCharacteristics.SENSOR_CALIBRATION_TRANSFORM1), 158 calibrationTransform1); 159 if (staticMetadata.get(CameraCharacteristics.SENSOR_CALIBRATION_TRANSFORM2) != null) { 160 convertColorspaceTransform( 161 staticMetadata.get(CameraCharacteristics.SENSOR_CALIBRATION_TRANSFORM2), 162 calibrationTransform2); 163 } else { 164 convertColorspaceTransform( 165 staticMetadata.get(CameraCharacteristics.SENSOR_CALIBRATION_TRANSFORM1), 166 calibrationTransform2); 167 } 168 colorMatrix1 = new float[9]; 169 colorMatrix2 = new float[9]; 170 convertColorspaceTransform( 171 staticMetadata.get(CameraCharacteristics.SENSOR_COLOR_TRANSFORM1), 172 colorMatrix1); 173 if (staticMetadata.get(CameraCharacteristics.SENSOR_COLOR_TRANSFORM2) != null) { 174 convertColorspaceTransform( 175 staticMetadata.get(CameraCharacteristics.SENSOR_COLOR_TRANSFORM2), 176 colorMatrix2); 177 } else { 178 convertColorspaceTransform( 179 staticMetadata.get(CameraCharacteristics.SENSOR_COLOR_TRANSFORM1), 180 colorMatrix2); 181 } 182 forwardTransform1 = new float[9]; 183 forwardTransform2 = new float[9]; 184 convertColorspaceTransform( 185 staticMetadata.get(CameraCharacteristics.SENSOR_FORWARD_MATRIX1), 186 forwardTransform1); 187 if (staticMetadata.get(CameraCharacteristics.SENSOR_FORWARD_MATRIX2) != null) { 188 convertColorspaceTransform( 189 staticMetadata.get(CameraCharacteristics.SENSOR_FORWARD_MATRIX2), 190 forwardTransform2); 191 } else { 192 convertColorspaceTransform( 193 staticMetadata.get(CameraCharacteristics.SENSOR_FORWARD_MATRIX1), 194 forwardTransform2); 195 } 196 197 neutralColorPoint = dynamicMetadata.get(CaptureResult.SENSOR_NEUTRAL_COLOR_POINT); 198 } 199 } 200 201 // Port of RAW16 converter from renderscript to Java. 202 // Comments copied verbatim from raw_converter.rscript 203 204 // This file includes a conversion kernel for RGGB, GRBG, GBRG, and BGGR Bayer patterns. 205 // Applying this script also will apply black-level subtraction, rescaling, clipping, 206 // tonemapping, and color space transforms along with the Bayer demosaic. 207 // See RawConverter.java for more information. 208 static class ConverterKernel { 209 210 // RAW16 buffer of dimensions (raw image stride) * (raw image height) 211 byte[] mInput; 212 213 // Whitelevel of sensor 214 int mWhiteLevel; 215 216 // X offset into inputRawBuffer 217 int mOffsetX; 218 219 // Y offset into inputRawBuffer 220 int mOffsetY; 221 222 // Width of raw buffer 223 int mInputWidth; 224 225 // Height of raw buffer 226 int mInputHeight; 227 228 // Stride of raw buffer 229 int mInputStride; 230 231 // Coefficients for a polynomial tonemapping curve 232 float[/*4*/] mToneMapCoeffs; 233 234 // Does gainmap exist? 235 boolean mHasGainMap; 236 237 // Gainmap to apply to linearized raw sensor data. 238 float[] mGainMap; 239 240 // The width of the gain map 241 int mGainMapWidth; 242 243 // The height of the gain map 244 int mGainMapHeight; 245 246 // Is monochrome camera? 247 boolean mIsMonochrome; 248 249 // Color transform from sensor to a wide-gamut colorspace 250 float[/*9*/] mSensorToIntermediate; 251 252 // Color transform from wide-gamut colorspace to sRGB 253 float[/*9*/] mIntermediateToSRGB; 254 255 // The camera neutral 256 float[/*3*/] mNeutralPoint; 257 258 // The Color Filter Arrangement pattern used 259 int mCfaPattern; 260 261 // Blacklevel to subtract for each channel, given in CFA order 262 int[/*4*/] mBlackLevel; 263 ConverterKernel()264 ConverterKernel() { } 265 set_inputRawBuffer(byte[] input)266 void set_inputRawBuffer(byte[] input) { 267 mInput = input; 268 } 269 set_whiteLevel(int whiteLevel)270 void set_whiteLevel(int whiteLevel) { 271 mWhiteLevel = whiteLevel; 272 } 273 set_offsetX(int offsetX)274 void set_offsetX(int offsetX) { 275 mOffsetX = offsetX; 276 } 277 set_offsetY(int offsetY)278 void set_offsetY(int offsetY) { 279 mOffsetY = offsetY; 280 } 281 set_rawWidth(int inputWidth)282 void set_rawWidth(int inputWidth) { 283 mInputWidth = inputWidth; 284 } 285 set_rawHeight(int inputHeight)286 void set_rawHeight(int inputHeight) { 287 mInputHeight = inputHeight; 288 } 289 set_rawStride(int inputStride)290 void set_rawStride(int inputStride) { 291 mInputStride = inputStride; 292 } 293 set_toneMapCoeffs(float[ ] toneMapCoeffs)294 void set_toneMapCoeffs(float[/*4*/] toneMapCoeffs) { 295 mToneMapCoeffs = toneMapCoeffs; 296 } 297 set_hasGainMap(boolean hasGainMap)298 void set_hasGainMap(boolean hasGainMap) { 299 mHasGainMap = hasGainMap; 300 } 301 set_gainMapWidth(int gainMapWidth)302 void set_gainMapWidth(int gainMapWidth) { 303 mGainMapWidth = gainMapWidth; 304 } 305 set_gainMapHeight(int gainMapHeight)306 void set_gainMapHeight(int gainMapHeight) { 307 mGainMapHeight = gainMapHeight; 308 } 309 set_gainMap(float[] gainMap)310 void set_gainMap(float[] gainMap) { 311 if (gainMap.length != mGainMapWidth * mGainMapHeight * 4) { 312 throw new IllegalArgumentException("Invalid float array of length " + gainMap.length 313 + ", must be correct size for gainMap of dimensions " 314 + mGainMapWidth + "x" + mGainMapHeight); 315 } 316 mGainMap = gainMap; 317 } 318 set_isMonochrome(boolean isMonochrome)319 void set_isMonochrome(boolean isMonochrome) { 320 mIsMonochrome = isMonochrome; 321 } 322 set_sensorToIntermediate(float[ ] sensorToIntermediate)323 void set_sensorToIntermediate(float[/*9*/] sensorToIntermediate) { 324 mSensorToIntermediate = sensorToIntermediate; 325 } 326 set_intermediateToSRGB(float[ ] intermediateToSRGB)327 void set_intermediateToSRGB(float[/*9*/] intermediateToSRGB) { 328 mIntermediateToSRGB = intermediateToSRGB; 329 } 330 set_neutralPoint(float[ ] neutralPoint)331 void set_neutralPoint(float[/*3*/] neutralPoint) { 332 mNeutralPoint = neutralPoint; 333 } 334 set_cfaPattern(int cfaPattern)335 void set_cfaPattern(int cfaPattern) { 336 mCfaPattern = cfaPattern; 337 } 338 set_blackLevelPattern(int[ ] blackLevelPattern)339 void set_blackLevelPattern(int[/*4*/] blackLevelPattern) { 340 mBlackLevel = blackLevelPattern; 341 } 342 getGain(int x, int y, int d)343 private float getGain(int x, int y, int d) { 344 return mGainMap[y * mGainMapWidth * 4 + x * 4 + d]; 345 } 346 347 // Interpolate gain map to find per-channel gains at a given pixel getGain(int x, int y)348 private float[/*4*/] getGain(int x, int y) { 349 float interpX = (((float) x) / mInputWidth) * mGainMapWidth; 350 float interpY = (((float) y) / mInputHeight) * mGainMapHeight; 351 int gX = (int) interpX; 352 int gY = (int) interpY; 353 int gXNext = (gX + 1 < mGainMapWidth) ? gX + 1 : gX; 354 int gYNext = (gY + 1 < mGainMapHeight) ? gY + 1 : gY; 355 356 float fracX = interpX - (float) gX; 357 float fracY = interpY - (float) gY; 358 float invFracX = 1.f - fracX; 359 float invFracY = 1.f - fracY; 360 361 float[/*4*/] gain = new float[4]; 362 363 for (int d = 0; d < 4; d++) { 364 float tl = getGain(gX, gY, d); 365 float tr = getGain(gXNext, gY, d); 366 float bl = getGain(gX, gYNext, d); 367 float br = getGain(gXNext, gYNext, d); 368 369 gain[d] = tl * invFracX * invFracY 370 + tr * fracX * invFracY 371 + bl * invFracX * fracY 372 + br * fracX * fracY; 373 } 374 375 return gain; 376 } 377 378 // Apply gamma correction using sRGB gamma curve gammaEncode(float x)379 static float gammaEncode(float x) { 380 return x <= 0.0031308f ? x * 12.92f : 1.055f * (float) Math.pow(x, 0.4166667f) - 0.055f; 381 } 382 383 // Apply gamma correction to each color channel in RGB pixel gammaCorrectPixel(float[ ] rgb)384 static float[/*3*/] gammaCorrectPixel(float[/*3*/] rgb) { 385 rgb[0] = gammaEncode(rgb[0]); 386 rgb[1] = gammaEncode(rgb[1]); 387 rgb[2] = gammaEncode(rgb[2]); 388 return rgb; 389 } 390 clamp(float v, float l, float u)391 static float clamp(float v, float l, float u) { 392 return (float) Math.min(Math.max(l, v), u); 393 } 394 matrixMultiply(float[ ] m, float[ ] v)395 static float[/*3*/] matrixMultiply(float[/*9*/] m, float[/*3*/] v) { 396 float x = m[0] * v[0] + m[1] * v[1] + m[2] * v[2]; 397 float y = m[3] * v[0] + m[4] * v[1] + m[5] * v[2]; 398 float z = m[6] * v[0] + m[7] * v[1] + m[8] * v[2]; 399 v[0] = x; v[1] = y; v[2] = z; 400 return v; 401 } 402 403 // Apply a colorspace transform to the intermediate colorspace, apply 404 // a tonemapping curve, apply a colorspace transform to a final colorspace, 405 // and apply a gamma correction curve. applyColorspace(float[ ] pRGB)406 private float[/*3*/] applyColorspace(float[/*3*/] pRGB) { 407 pRGB[0] = clamp(pRGB[0], 0.f, mNeutralPoint[0]); 408 pRGB[1] = clamp(pRGB[1], 0.f, mNeutralPoint[1]); 409 pRGB[2] = clamp(pRGB[2], 0.f, mNeutralPoint[2]); 410 411 matrixMultiply(mSensorToIntermediate, pRGB); 412 tonemap(pRGB); 413 matrixMultiply(mIntermediateToSRGB, pRGB); 414 415 pRGB[0] = clamp(pRGB[0], 0.f, 1.f); 416 pRGB[1] = clamp(pRGB[1], 0.f, 1.f); 417 pRGB[2] = clamp(pRGB[2], 0.f, 1.f); 418 419 return gammaCorrectPixel(pRGB); 420 } 421 422 // Apply polynomial tonemapping curve to each color channel in RGB pixel. 423 // This attempts to apply tonemapping without changing the hue of each pixel, 424 // i.e.: 425 // 426 // For some RGB values: 427 // M = max(R, G, B) 428 // m = min(R, G, B) 429 // m' = mid(R, G, B) 430 // chroma = M - m 431 // H = m' - m / chroma 432 // 433 // The relationship H=H' should be preserved, where H and H' are calculated from 434 // the RGB and RGB' value at this pixel before and after this tonemapping 435 // operation has been applied, respectively. tonemap(float[ ] rgb)436 private float[/*3*/] tonemap(float[/*3*/] rgb) { 437 rgb[0] = clamp(rgb[0], 0.f, 1.f); 438 rgb[1] = clamp(rgb[1], 0.f, 1.f); 439 rgb[2] = clamp(rgb[2], 0.f, 1.f); 440 441 float tmp; 442 int permutation = 0; 443 444 // Sort the RGB channels by value 445 if (rgb[2] < rgb[1]) { 446 tmp = rgb[2]; 447 rgb[2] = rgb[1]; 448 rgb[1] = tmp; 449 permutation |= 1; 450 } 451 if (rgb[1] < rgb[0]) { 452 tmp = rgb[1]; 453 rgb[1] = rgb[0]; 454 rgb[0] = tmp; 455 permutation |= 2; 456 } 457 if (rgb[2] < rgb[1]) { 458 tmp = rgb[2]; 459 rgb[2] = rgb[1]; 460 rgb[1] = tmp; 461 permutation |= 4; 462 } 463 464 float min = rgb[0]; 465 float max = rgb[2]; 466 467 // Apply tonemapping curve to min, max RGB channel values 468 min = (float) Math.pow(min, 3.f) * mToneMapCoeffs[0] 469 + (float) Math.pow(min, 2.f) * mToneMapCoeffs[1] 470 + (float) /*Math.pow(min, 1.f)*/min * mToneMapCoeffs[2] 471 + (float) /*Math.pow(min, 0.f)*/1.0 * mToneMapCoeffs[3]; 472 473 max = (float) Math.pow(max, 3.f) * mToneMapCoeffs[0] 474 + (float) Math.pow(max, 2.f) * mToneMapCoeffs[1] 475 + (float) /*Math.pow(max, 1.f)*/max * mToneMapCoeffs[2] 476 + (float) /*Math.pow(max, 0.f)*/1.0 * mToneMapCoeffs[3]; 477 478 // Rescale middle value 479 float newMid; 480 if (rgb[2] == rgb[0]) { 481 newMid = max; 482 } else { 483 newMid = min + (max - min) * (rgb[1] - rgb[0]) / (rgb[2] - rgb[0]); 484 } 485 486 switch (permutation) { 487 // b >= g >= r 488 case 0 : { 489 rgb[0] = min; 490 rgb[1] = newMid; 491 rgb[2] = max; 492 break; 493 } 494 // g >= b >= r 495 case 1 : { 496 rgb[0] = min; 497 rgb[2] = newMid; 498 rgb[1] = max; 499 break; 500 } 501 // b >= r >= g 502 case 2 : { 503 rgb[1] = min; 504 rgb[0] = newMid; 505 rgb[2] = max; 506 break; 507 } 508 // g >= r >= b 509 case 3 : { 510 rgb[2] = min; 511 rgb[0] = newMid; 512 rgb[1] = max; 513 break; 514 } 515 // r >= b >= g 516 case 6 : { 517 rgb[1] = min; 518 rgb[2] = newMid; 519 rgb[0] = max; 520 break; 521 } 522 // r >= g >= b 523 case 7 : { 524 rgb[2] = min; 525 rgb[1] = newMid; 526 rgb[0] = max; 527 break; 528 } 529 case 4 : // impossible 530 case 5 : // impossible 531 default : { 532 rgb[0] = 0.f; 533 rgb[1] = 0.f; 534 rgb[2] = 0.f; 535 throw new IllegalStateException("RawConverter: Logic error in tonemap."); 536 } 537 } 538 539 rgb[0] = clamp(rgb[0], 0.f, 1.f); 540 rgb[1] = clamp(rgb[1], 0.f, 1.f); 541 rgb[2] = clamp(rgb[2], 0.f, 1.f); 542 543 return rgb; 544 } 545 getInput(int x, int y)546 private float getInput(int x, int y) { 547 // 16-bit raw pixels (big endian) 548 return (Byte.toUnsignedInt(mInput[y * mInputStride + 2 * x + 1]) << 8) 549 + Byte.toUnsignedInt(mInput[y * mInputStride + 2 * x]); 550 } 551 552 // Load a 3x3 patch of pixels into the output. load3x3(int x, int y, float[ ] outputArray)553 private void load3x3(int x, int y, /*out*/float[/*9*/] outputArray) { 554 outputArray[0] = getInput(x - 1, y - 1); 555 outputArray[1] = getInput(x, y - 1); 556 outputArray[2] = getInput(x + 1, y - 1); 557 outputArray[3] = getInput(x - 1, y); 558 outputArray[4] = getInput(x, y); 559 outputArray[5] = getInput(x + 1, y); 560 outputArray[6] = getInput(x - 1, y + 1); 561 outputArray[7] = getInput(x, y + 1); 562 outputArray[8] = getInput(x + 1, y + 1); 563 } 564 565 // Blacklevel subtract, and normalize each pixel in the outputArray, and apply the 566 // gain map. linearizeAndGainmap(int x, int y, float[ ] outputArray)567 void linearizeAndGainmap(int x, int y, /*inout*/float[/*9*/] outputArray) { 568 int kk = 0; 569 for (int j = y - 1; j <= y + 1; j++) { 570 for (int i = x - 1; i <= x + 1; i++) { 571 int index = (i & 1) | ((j & 1) << 1); // bits [0,1] are blacklevel offset 572 index |= (mCfaPattern << 2); // bits [2,3] are cfa 573 float bl = 0.f; 574 float g = 1.f; 575 float[/*4*/] gains = new float[]{1.f, 1.f, 1.f, 1.f}; 576 if (mHasGainMap) { 577 gains = getGain(i, j); 578 } 579 switch (index) { 580 // RGGB 581 case 0 : { 582 bl = mBlackLevel[0]; 583 g = gains[0]; 584 break; 585 } 586 case 1 : { 587 bl = mBlackLevel[1]; 588 g = gains[1]; 589 break; 590 } 591 case 2 : { 592 bl = mBlackLevel[2]; 593 g = gains[2]; 594 break; 595 } 596 case 3 : { 597 bl = mBlackLevel[3]; 598 g = gains[3]; 599 break; 600 } 601 // GRBG 602 case 4 : { 603 bl = mBlackLevel[0]; 604 g = gains[1]; 605 break; 606 } 607 case 5 : { 608 bl = mBlackLevel[1]; 609 g = gains[0]; 610 break; 611 } 612 case 6 : { 613 bl = mBlackLevel[2]; 614 g = gains[3]; 615 break; 616 } 617 case 7 : { 618 bl = mBlackLevel[3]; 619 g = gains[2]; 620 break; 621 } 622 // GBRG 623 case 8 : { 624 bl = mBlackLevel[0]; 625 g = gains[1]; 626 break; 627 } 628 case 9 : { 629 bl = mBlackLevel[1]; 630 g = gains[3]; 631 break; 632 } 633 case 10 : { 634 bl = mBlackLevel[2]; 635 g = gains[0]; 636 break; 637 } 638 case 11 : { 639 bl = mBlackLevel[3]; 640 g = gains[2]; 641 break; 642 } 643 // BGGR 644 case 12 : { 645 bl = mBlackLevel[0]; 646 g = gains[3]; 647 break; 648 } 649 case 13 : { 650 bl = mBlackLevel[1]; 651 g = gains[1]; 652 break; 653 } 654 case 14 : { 655 bl = mBlackLevel[2]; 656 g = gains[2]; 657 break; 658 } 659 case 15 : { 660 bl = mBlackLevel[3]; 661 g = gains[0]; 662 break; 663 } 664 } 665 outputArray[kk] = clamp(g * (outputArray[kk] - bl) / (mWhiteLevel - bl), 0, 1); 666 kk++; 667 } 668 } 669 } 670 671 // Apply bilinear-interpolation to demosaic demosaic(int x, int y, int cfa, float[ ] inputArray)672 static float[/*3*/] demosaic(int x, int y, int cfa, float[/*9*/] inputArray) { 673 int index = (x & 1) | ((y & 1) << 1); 674 index |= (cfa << 2); 675 676 float[/*3*/] pRGB = new float[3]; 677 switch (index) { 678 case 0 : 679 case 5 : 680 case 10 : 681 case 15 : { // Red centered 682 // B G B 683 // G R G 684 // B G B 685 pRGB[0] = inputArray[4]; 686 pRGB[1] = (inputArray[1] + inputArray[3] + inputArray[5] + inputArray[7]) / 4; 687 pRGB[2] = (inputArray[0] + inputArray[2] + inputArray[6] + inputArray[8]) / 4; 688 break; 689 } 690 case 1 : 691 case 4 : 692 case 11 : 693 case 14 : { // Green centered w/ horizontally adjacent Red 694 // G B G 695 // R G R 696 // G B G 697 pRGB[0] = (inputArray[3] + inputArray[5]) / 2; 698 pRGB[1] = inputArray[4]; 699 pRGB[2] = (inputArray[1] + inputArray[7]) / 2; 700 break; 701 } 702 case 2 : 703 case 7 : 704 case 8 : 705 case 13 : { // Green centered w/ horizontally adjacent Blue 706 // G R G 707 // B G B 708 // G R G 709 pRGB[0] = (inputArray[1] + inputArray[7]) / 2; 710 pRGB[1] = inputArray[4]; 711 pRGB[2] = (inputArray[3] + inputArray[5]) / 2; 712 break; 713 } 714 case 3 : 715 case 6 : 716 case 9 : 717 case 12 : { // Blue centered 718 // R G R 719 // G B G 720 // R G R 721 pRGB[0] = (inputArray[0] + inputArray[2] + inputArray[6] + inputArray[8]) / 4; 722 pRGB[1] = (inputArray[1] + inputArray[3] + inputArray[5] + inputArray[7]) / 4; 723 pRGB[2] = inputArray[4]; 724 break; 725 } 726 } 727 728 return pRGB; 729 } 730 packColorTo8888(float[ ] pRGB)731 static int packColorTo8888(float[/*3*/] pRGB) { 732 int a = 255; 733 int r = (int) (pRGB[0] * 255); 734 int g = (int) (pRGB[1] * 255); 735 int b = (int) (pRGB[2] * 255); 736 int color = ((a & 0xff) << 24) | ((r & 0xff) << 16) | ((g & 0xff) << 8) | (b & 0xff); 737 return color; 738 } 739 740 // Full RAW->ARGB bitmap conversion kernel convert_RAW_To_ARGB(int x, int y)741 int convert_RAW_To_ARGB(int x, int y) { 742 float[/*3*/] pRGB; 743 int xP = x + mOffsetX; 744 int yP = y + mOffsetY; 745 if (xP == 0) xP = 1; 746 if (yP == 0) yP = 1; 747 if (xP == mInputWidth - 1) xP = mInputWidth - 2; 748 if (yP == mInputHeight - 1) yP = mInputHeight - 2; 749 750 if (mIsMonochrome) { 751 float pixel = getInput(x, y); 752 753 // Apply linearization and gain map 754 float[/*4*/] gains = new float[]{1.f, 1.f, 1.f, 1.f}; 755 if (mHasGainMap) { 756 gains = getGain(xP, yP); 757 } 758 float bl = mBlackLevel[0]; 759 float g = gains[0]; 760 pixel = clamp(g * (pixel - bl) / (mWhiteLevel - bl), 0.f, 1.f); 761 762 // Use same Y value for R, G, and B. 763 pRGB = new float[3]; 764 pRGB[0] = pRGB[1] = pRGB[2] = pixel; 765 766 // apply tonemap and gamma correction 767 tonemap(pRGB); 768 gammaCorrectPixel(pRGB); 769 } else { 770 float[] patch = new float[9]; 771 // TODO: Once ScriptGroup and RS kernels have been updated to allow for iteration 772 // over 3x3 pixel patches, this can be optimized to avoid re-applying the 773 // pre-demosaic steps for each pixel, potentially achieving a 9x speedup here. 774 load3x3(xP, yP, /*out*/ patch); 775 linearizeAndGainmap(xP, yP, /*inout*/patch); 776 pRGB = demosaic(xP, yP, mCfaPattern, patch); 777 applyColorspace(pRGB); 778 } 779 780 return packColorTo8888(pRGB); 781 } 782 forEach_convert_RAW_To_ARGB(Bitmap argbOutput)783 void forEach_convert_RAW_To_ARGB(Bitmap argbOutput) { 784 for (int j = 0; j < mInputHeight; j++) { 785 for (int i = 0; i < mInputWidth; i++) { 786 argbOutput.setPixel(i, j, convert_RAW_To_ARGB(i, j)); 787 } 788 } 789 } 790 791 } 792 793 /** 794 * Convert a RAW16 buffer into an sRGB buffer, and write the result into a bitmap. 795 * 796 * <p> This function applies the operations roughly outlined in the Adobe DNG specification 797 * using the provided metadata about the image sensor. Sensor data for Android devices is 798 * assumed to be relatively linear, and no extra linearization step is applied here. The 799 * following operations are applied in the given order:</p> 800 * 801 * <ul> 802 * <li> 803 * Black level subtraction - the black levels given in the SENSOR_BLACK_LEVEL_PATTERN 804 * tag are subtracted from the corresponding raw pixels. 805 * </li> 806 * <li> 807 * Rescaling - each raw pixel is scaled by 1/(white level - black level). 808 * </li> 809 * <li> 810 * Lens shading correction - the interpolated gains from the gain map defined in the 811 * STATISTICS_LENS_SHADING_CORRECTION_MAP are applied to each raw pixel. 812 * </li> 813 * <li> 814 * Clipping - each raw pixel is clipped to a range of [0.0, 1.0]. 815 * </li> 816 * <li> 817 * Demosaic - the RGB channels for each pixel are retrieved from the Bayer mosaic 818 * of raw pixels using a simple bilinear-interpolation demosaicing algorithm. 819 * </li> 820 * <li> 821 * Colorspace transform to wide-gamut RGB - each pixel is mapped into a 822 * wide-gamut colorspace (in this case ProPhoto RGB is used) from the sensor 823 * colorspace. 824 * </li> 825 * <li> 826 * Tonemapping - A basic tonemapping curve using the default from ACR3 is applied 827 * (no further exposure compensation is applied here, though this could be improved). 828 * </li> 829 * <li> 830 * Colorspace transform to final RGB - each pixel is mapped into linear sRGB colorspace. 831 * </li> 832 * <li> 833 * Gamma correction - each pixel is gamma corrected using γ=2.2 to map into sRGB 834 * colorspace for viewing. 835 * </li> 836 * <li> 837 * Packing - each pixel is scaled so that each color channel has a range of [0, 255], 838 * and is packed into an Android bitmap. 839 * </li> 840 * </ul> 841 * 842 * <p> Arguments given here are assumed to come from the values for the corresponding 843 * {@link CameraCharacteristics.Key}s defined for the camera that produced this RAW16 buffer. 844 * </p> 845 * @param inputWidth width of the input RAW16 image in pixels. 846 * @param inputHeight height of the input RAW16 image in pixels. 847 * @param inputStride stride of the input RAW16 image in bytes. 848 * @param rawImageInput a byte array containing a RAW16 image. 849 * @param staticMetadata the {@link CameraCharacteristics} for this RAW capture. 850 * @param dynamicMetadata the {@link CaptureResult} for this RAW capture. 851 * @param outputOffsetX the offset width into the raw image of the left side of the output 852 * rectangle. 853 * @param outputOffsetY the offset height into the raw image of the top side of the output 854 * rectangle. 855 * @param argbOutput a {@link Bitmap} to output the rendered RAW image into. The height and 856 * width of this bitmap along with the output offsets are used to determine 857 * the dimensions and offset of the output rectangle contained in the RAW 858 * image to be rendered. 859 */ convertToSRGB(int inputWidth, int inputHeight, int inputStride, byte[] rawImageInput, CameraCharacteristics staticMetadata, CaptureResult dynamicMetadata, int outputOffsetX, int outputOffsetY, Bitmap argbOutput)860 public static void convertToSRGB(int inputWidth, int inputHeight, 861 int inputStride, byte[] rawImageInput, CameraCharacteristics staticMetadata, 862 CaptureResult dynamicMetadata, int outputOffsetX, int outputOffsetY, 863 /*out*/Bitmap argbOutput) { 864 int cfa = staticMetadata.get(CameraCharacteristics.SENSOR_INFO_COLOR_FILTER_ARRANGEMENT); 865 boolean isMono = (cfa == CameraCharacteristics.SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_MONO || 866 cfa == CameraCharacteristics.SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_NIR); 867 int[] blackLevelPattern = new int[4]; 868 staticMetadata.get(CameraCharacteristics.SENSOR_BLACK_LEVEL_PATTERN). 869 copyTo(blackLevelPattern, /*offset*/0); 870 int whiteLevel = staticMetadata.get(CameraCharacteristics.SENSOR_INFO_WHITE_LEVEL); 871 872 LensShadingMap shadingMap = dynamicMetadata.get( 873 CaptureResult.STATISTICS_LENS_SHADING_CORRECTION_MAP); 874 875 DngBayerMetadata dngBayerMetadata = null; 876 if (!isMono) { 877 dngBayerMetadata = new DngBayerMetadata(staticMetadata, dynamicMetadata); 878 } 879 convertToSRGB(inputWidth, inputHeight, inputStride, cfa, blackLevelPattern, 880 whiteLevel, rawImageInput, dngBayerMetadata, 881 shadingMap, outputOffsetX, outputOffsetY, argbOutput); 882 } 883 884 /** 885 * Convert a RAW16 buffer into an sRGB buffer, and write the result into a bitmap. 886 * 887 * @see #convertToSRGB 888 */ convertToSRGB(int inputWidth, int inputHeight, int inputStride, int cfa, int[] blackLevelPattern, int whiteLevel, byte[] rawImageInput, DngBayerMetadata dngBayerMetadata, LensShadingMap lensShadingMap, int outputOffsetX, int outputOffsetY, Bitmap argbOutput)889 private static void convertToSRGB(int inputWidth, int inputHeight, 890 int inputStride, int cfa, int[] blackLevelPattern, int whiteLevel, byte[] rawImageInput, 891 DngBayerMetadata dngBayerMetadata, LensShadingMap lensShadingMap, 892 int outputOffsetX, int outputOffsetY, /*out*/Bitmap argbOutput) { 893 894 // Validate arguments 895 if (argbOutput == null || rawImageInput == null) { 896 throw new IllegalArgumentException("Null argument to convertToSRGB"); 897 } 898 if (argbOutput.getConfig() != Bitmap.Config.ARGB_8888) { 899 throw new IllegalArgumentException( 900 "Output bitmap passed to convertToSRGB is not ARGB_8888 format"); 901 } 902 if (outputOffsetX < 0 || outputOffsetY < 0) { 903 throw new IllegalArgumentException("Negative offset passed to convertToSRGB"); 904 } 905 if ((inputStride / 2) < inputWidth) { 906 throw new IllegalArgumentException("Stride too small."); 907 } 908 if ((inputStride % 2) != 0) { 909 throw new IllegalArgumentException("Invalid stride for RAW16 format, see graphics.h."); 910 } 911 int outWidth = argbOutput.getWidth(); 912 int outHeight = argbOutput.getHeight(); 913 if (outWidth + outputOffsetX > inputWidth || outHeight + outputOffsetY > inputHeight) { 914 throw new IllegalArgumentException("Raw image with dimensions (w=" + inputWidth + 915 ", h=" + inputHeight + "), cannot converted into sRGB image with dimensions (w=" 916 + outWidth + ", h=" + outHeight + ")."); 917 } 918 if (cfa < 0 || cfa > 5) { 919 throw new IllegalArgumentException("Unsupported cfa pattern " + cfa + " used."); 920 } 921 if (DEBUG) { 922 Log.d(TAG, "Metadata Used:"); 923 Log.d(TAG, "Input width,height: " + inputWidth + "," + inputHeight); 924 Log.d(TAG, "Output offset x,y: " + outputOffsetX + "," + outputOffsetY); 925 Log.d(TAG, "Output width,height: " + outWidth + "," + outHeight); 926 Log.d(TAG, "CFA: " + cfa); 927 Log.d(TAG, "BlackLevelPattern: " + Arrays.toString(blackLevelPattern)); 928 Log.d(TAG, "WhiteLevel: " + whiteLevel); 929 } 930 931 float[] sensorToProPhoto = new float[9]; 932 float[] proPhotoToSRGB = new float[9]; 933 if (dngBayerMetadata != null) { 934 float[] normalizedForwardTransform1 = Arrays.copyOf(dngBayerMetadata.forwardTransform1, 935 dngBayerMetadata.forwardTransform1.length); 936 normalizeFM(normalizedForwardTransform1); 937 float[] normalizedForwardTransform2 = Arrays.copyOf(dngBayerMetadata.forwardTransform2, 938 dngBayerMetadata.forwardTransform2.length); 939 normalizeFM(normalizedForwardTransform2); 940 941 float[] normalizedColorMatrix1 = Arrays.copyOf(dngBayerMetadata.colorMatrix1, 942 dngBayerMetadata.colorMatrix1.length); 943 normalizeCM(normalizedColorMatrix1); 944 float[] normalizedColorMatrix2 = Arrays.copyOf(dngBayerMetadata.colorMatrix2, 945 dngBayerMetadata.colorMatrix2.length); 946 normalizeCM(normalizedColorMatrix2); 947 948 if (DEBUG) { 949 Log.d(TAG, "ReferenceIlluminant1: " + dngBayerMetadata.referenceIlluminant1); 950 Log.d(TAG, "ReferenceIlluminant2: " + dngBayerMetadata.referenceIlluminant2); 951 Log.d(TAG, "CalibrationTransform1: " 952 + Arrays.toString(dngBayerMetadata.calibrationTransform1)); 953 Log.d(TAG, "CalibrationTransform2: " 954 + Arrays.toString(dngBayerMetadata.calibrationTransform2)); 955 Log.d(TAG, "ColorMatrix1: " 956 + Arrays.toString(dngBayerMetadata.colorMatrix1)); 957 Log.d(TAG, "ColorMatrix2: " 958 + Arrays.toString(dngBayerMetadata.colorMatrix2)); 959 Log.d(TAG, "ForwardTransform1: " 960 + Arrays.toString(dngBayerMetadata.forwardTransform1)); 961 Log.d(TAG, "ForwardTransform2: " 962 + Arrays.toString(dngBayerMetadata.forwardTransform2)); 963 Log.d(TAG, "NeutralColorPoint: " 964 + Arrays.toString(dngBayerMetadata.neutralColorPoint)); 965 966 Log.d(TAG, "Normalized ForwardTransform1: " 967 + Arrays.toString(normalizedForwardTransform1)); 968 Log.d(TAG, "Normalized ForwardTransform2: " 969 + Arrays.toString(normalizedForwardTransform2)); 970 Log.d(TAG, "Normalized ColorMatrix1: " 971 + Arrays.toString(normalizedColorMatrix1)); 972 Log.d(TAG, "Normalized ColorMatrix2: " 973 + Arrays.toString(normalizedColorMatrix2)); 974 } 975 976 // Calculate full sensor colorspace to sRGB colorspace transform. 977 double interpolationFactor = findDngInterpolationFactor( 978 dngBayerMetadata.referenceIlluminant1, dngBayerMetadata.referenceIlluminant2, 979 dngBayerMetadata.calibrationTransform1, dngBayerMetadata.calibrationTransform2, 980 normalizedColorMatrix1, normalizedColorMatrix2, 981 dngBayerMetadata.neutralColorPoint); 982 if (DEBUG) Log.d(TAG, "Interpolation factor used: " + interpolationFactor); 983 float[] sensorToXYZ = new float[9]; 984 calculateCameraToXYZD50Transform(normalizedForwardTransform1, 985 normalizedForwardTransform2, 986 dngBayerMetadata.calibrationTransform1, dngBayerMetadata.calibrationTransform2, 987 dngBayerMetadata.neutralColorPoint, 988 interpolationFactor, /*out*/sensorToXYZ); 989 if (DEBUG) Log.d(TAG, "CameraToXYZ xform used: " + Arrays.toString(sensorToXYZ)); 990 multiply(sXYZtoProPhoto, sensorToXYZ, /*out*/sensorToProPhoto); 991 if (DEBUG) { 992 Log.d(TAG, "CameraToIntemediate xform used: " + Arrays.toString(sensorToProPhoto)); 993 } 994 multiply(sXYZtoRGBBradford, sProPhotoToXYZ, /*out*/proPhotoToSRGB); 995 } 996 997 ConverterKernel converterKernel = new ConverterKernel(); 998 converterKernel.set_inputRawBuffer(rawImageInput); 999 converterKernel.set_whiteLevel(whiteLevel); 1000 converterKernel.set_offsetX(outputOffsetX); 1001 converterKernel.set_offsetY(outputOffsetY); 1002 converterKernel.set_rawHeight(inputHeight); 1003 converterKernel.set_rawWidth(inputWidth); 1004 converterKernel.set_rawStride(inputStride); 1005 converterKernel.set_toneMapCoeffs(DEFAULT_ACR3_TONEMAP_CURVE_COEFFS); 1006 converterKernel.set_hasGainMap(lensShadingMap != null); 1007 if (lensShadingMap != null) { 1008 float[] gainMap = new float[lensShadingMap.getGainFactorCount()]; 1009 lensShadingMap.copyGainFactors(/*inout*/gainMap, /*offset*/0); 1010 converterKernel.set_gainMapWidth(lensShadingMap.getColumnCount()); 1011 converterKernel.set_gainMapHeight(lensShadingMap.getRowCount()); 1012 converterKernel.set_gainMap(gainMap); 1013 } 1014 1015 converterKernel.set_isMonochrome(dngBayerMetadata == null); 1016 if (dngBayerMetadata != null) { 1017 converterKernel.set_sensorToIntermediate(sensorToProPhoto); 1018 converterKernel.set_intermediateToSRGB(proPhotoToSRGB); 1019 converterKernel.set_neutralPoint( 1020 new float[]{dngBayerMetadata.neutralColorPoint[0].floatValue(), 1021 dngBayerMetadata.neutralColorPoint[1].floatValue(), 1022 dngBayerMetadata.neutralColorPoint[2].floatValue()}); 1023 } 1024 1025 converterKernel.set_cfaPattern(cfa); 1026 converterKernel.set_blackLevelPattern(blackLevelPattern); 1027 converterKernel.forEach_convert_RAW_To_ARGB(argbOutput); 1028 } 1029 1030 /** 1031 * Calculate the correlated color temperature (CCT) for a given x,y chromaticity in CIE 1931 x,y 1032 * chromaticity space using McCamy's cubic approximation algorithm given in: 1033 * 1034 * McCamy, Calvin S. (April 1992). 1035 * "Correlated color temperature as an explicit function of chromaticity coordinates". 1036 * Color Research & Application 17 (2): 142–144 1037 * 1038 * @param x x chromaticity component. 1039 * @param y y chromaticity component. 1040 * 1041 * @return the CCT associated with this chromaticity coordinate. 1042 */ calculateColorTemperature(double x, double y)1043 private static double calculateColorTemperature(double x, double y) { 1044 double n = (x - 0.332) / (y - 0.1858); 1045 return -449 * Math.pow(n, 3) + 3525 * Math.pow(n, 2) - 6823.3 * n + 5520.33; 1046 } 1047 1048 /** 1049 * Calculate the x,y chromaticity coordinates in CIE 1931 x,y chromaticity space from the given 1050 * CIE XYZ coordinates. 1051 * 1052 * @param X the CIE XYZ X coordinate. 1053 * @param Y the CIE XYZ Y coordinate. 1054 * @param Z the CIE XYZ Z coordinate. 1055 * 1056 * @return the [x, y] chromaticity coordinates as doubles. 1057 */ calculateCIExyCoordinates(double X, double Y, double Z)1058 private static double[] calculateCIExyCoordinates(double X, double Y, double Z) { 1059 double[] ret = new double[] { 0, 0 }; 1060 ret[0] = X / (X + Y + Z); 1061 ret[1] = Y / (X + Y + Z); 1062 return ret; 1063 } 1064 1065 /** 1066 * Linearly interpolate between a and b given fraction f. 1067 * 1068 * @param a first term to interpolate between, a will be returned when f == 0. 1069 * @param b second term to interpolate between, b will be returned when f == 1. 1070 * @param f the fraction to interpolate by. 1071 * 1072 * @return interpolated result as double. 1073 */ lerp(double a, double b, double f)1074 private static double lerp(double a, double b, double f) { 1075 return (a * (1.0f - f)) + (b * f); 1076 } 1077 1078 /** 1079 * Linearly interpolate between 3x3 matrices a and b given fraction f. 1080 * 1081 * @param a first 3x3 matrix to interpolate between, a will be returned when f == 0. 1082 * @param b second 3x3 matrix to interpolate between, b will be returned when f == 1. 1083 * @param f the fraction to interpolate by. 1084 * @param result will be set to contain the interpolated matrix. 1085 */ lerp(float[] a, float[] b, double f, float[] result)1086 private static void lerp(float[] a, float[] b, double f, /*out*/float[] result) { 1087 for (int i = 0; i < 9; i++) { 1088 result[i] = (float) lerp(a[i], b[i], f); 1089 } 1090 } 1091 1092 /** 1093 * Find the interpolation factor to use with the RAW matrices given a neutral color point. 1094 * 1095 * @param referenceIlluminant1 first reference illuminant. 1096 * @param referenceIlluminant2 second reference illuminant. 1097 * @param calibrationTransform1 calibration matrix corresponding to the first reference 1098 * illuminant. 1099 * @param calibrationTransform2 calibration matrix corresponding to the second reference 1100 * illuminant. 1101 * @param colorMatrix1 color matrix corresponding to the first reference illuminant. 1102 * @param colorMatrix2 color matrix corresponding to the second reference illuminant. 1103 * @param neutralColorPoint the neutral color point used to calculate the interpolation factor. 1104 * 1105 * @return the interpolation factor corresponding to the given neutral color point. 1106 */ findDngInterpolationFactor(int referenceIlluminant1, int referenceIlluminant2, float[] calibrationTransform1, float[] calibrationTransform2, float[] colorMatrix1, float[] colorMatrix2, Rational[ ] neutralColorPoint)1107 private static double findDngInterpolationFactor(int referenceIlluminant1, 1108 int referenceIlluminant2, float[] calibrationTransform1, float[] calibrationTransform2, 1109 float[] colorMatrix1, float[] colorMatrix2, Rational[/*3*/] neutralColorPoint) { 1110 1111 int colorTemperature1 = sStandardIlluminants.get(referenceIlluminant1, NO_ILLUMINANT); 1112 if (colorTemperature1 == NO_ILLUMINANT) { 1113 throw new IllegalArgumentException("No such illuminant for reference illuminant 1: " + 1114 referenceIlluminant1); 1115 } 1116 1117 int colorTemperature2 = sStandardIlluminants.get(referenceIlluminant2, NO_ILLUMINANT); 1118 if (colorTemperature2 == NO_ILLUMINANT) { 1119 throw new IllegalArgumentException("No such illuminant for reference illuminant 2: " + 1120 referenceIlluminant2); 1121 } 1122 1123 if (DEBUG) Log.d(TAG, "ColorTemperature1: " + colorTemperature1); 1124 if (DEBUG) Log.d(TAG, "ColorTemperature2: " + colorTemperature2); 1125 1126 double interpFactor = 0.5; // Initial guess for interpolation factor 1127 double oldInterpFactor = interpFactor; 1128 1129 double lastDiff = Double.MAX_VALUE; 1130 double tolerance = 0.0001; 1131 float[] XYZToCamera1 = new float[9]; 1132 float[] XYZToCamera2 = new float[9]; 1133 multiply(calibrationTransform1, colorMatrix1, /*out*/XYZToCamera1); 1134 multiply(calibrationTransform2, colorMatrix2, /*out*/XYZToCamera2); 1135 1136 float[] cameraNeutral = new float[] { neutralColorPoint[0].floatValue(), 1137 neutralColorPoint[1].floatValue(), neutralColorPoint[2].floatValue()}; 1138 1139 float[] neutralGuess = new float[3]; 1140 float[] interpXYZToCamera = new float[9]; 1141 float[] interpXYZToCameraInverse = new float[9]; 1142 1143 1144 double lower = Math.min(colorTemperature1, colorTemperature2); 1145 double upper = Math.max(colorTemperature1, colorTemperature2); 1146 1147 if(DEBUG) { 1148 Log.d(TAG, "XYZtoCamera1: " + Arrays.toString(XYZToCamera1)); 1149 Log.d(TAG, "XYZtoCamera2: " + Arrays.toString(XYZToCamera2)); 1150 Log.d(TAG, "Finding interpolation factor, initial guess 0.5..."); 1151 } 1152 // Iteratively guess xy value, find new CCT, and update interpolation factor. 1153 int loopLimit = 30; 1154 int count = 0; 1155 while (lastDiff > tolerance && loopLimit > 0) { 1156 if (DEBUG) Log.d(TAG, "Loop count " + count); 1157 lerp(XYZToCamera1, XYZToCamera2, interpFactor, interpXYZToCamera); 1158 if (!invert(interpXYZToCamera, /*out*/interpXYZToCameraInverse)) { 1159 throw new IllegalArgumentException( 1160 "Cannot invert XYZ to Camera matrix, input matrices are invalid."); 1161 } 1162 1163 map(interpXYZToCameraInverse, cameraNeutral, /*out*/neutralGuess); 1164 double[] xy = calculateCIExyCoordinates(neutralGuess[0], neutralGuess[1], 1165 neutralGuess[2]); 1166 1167 double colorTemperature = calculateColorTemperature(xy[0], xy[1]); 1168 1169 if (colorTemperature <= lower) { 1170 interpFactor = 1; 1171 } else if (colorTemperature >= upper) { 1172 interpFactor = 0; 1173 } else { 1174 double invCT = 1.0 / colorTemperature; 1175 interpFactor = (invCT - 1.0 / upper) / ( 1.0 / lower - 1.0 / upper); 1176 } 1177 1178 if (lower == colorTemperature1) { 1179 interpFactor = 1.0 - interpFactor; 1180 } 1181 1182 interpFactor = (interpFactor + oldInterpFactor) / 2; 1183 lastDiff = Math.abs(oldInterpFactor - interpFactor); 1184 oldInterpFactor = interpFactor; 1185 loopLimit--; 1186 count++; 1187 1188 if (DEBUG) { 1189 Log.d(TAG, "CameraToXYZ chosen: " + Arrays.toString(interpXYZToCameraInverse)); 1190 Log.d(TAG, "XYZ neutral color guess: " + Arrays.toString(neutralGuess)); 1191 Log.d(TAG, "xy coordinate: " + Arrays.toString(xy)); 1192 Log.d(TAG, "xy color temperature: " + colorTemperature); 1193 Log.d(TAG, "New interpolation factor: " + interpFactor); 1194 } 1195 } 1196 1197 if (loopLimit == 0) { 1198 Log.w(TAG, "Could not converge on interpolation factor, using factor " + interpFactor + 1199 " with remaining error factor of " + lastDiff); 1200 } 1201 return interpFactor; 1202 } 1203 1204 /** 1205 * Calculate the transform from the raw camera sensor colorspace to CIE XYZ colorspace with a 1206 * D50 whitepoint. 1207 * 1208 * @param forwardTransform1 forward transform matrix corresponding to the first reference 1209 * illuminant. 1210 * @param forwardTransform2 forward transform matrix corresponding to the second reference 1211 * illuminant. 1212 * @param calibrationTransform1 calibration transform matrix corresponding to the first 1213 * reference illuminant. 1214 * @param calibrationTransform2 calibration transform matrix corresponding to the second 1215 * reference illuminant. 1216 * @param neutralColorPoint the neutral color point used to calculate the interpolation factor. 1217 * @param interpolationFactor the interpolation factor to use for the forward and 1218 * calibration transforms. 1219 * @param outputTransform set to the full sensor to XYZ colorspace transform. 1220 */ calculateCameraToXYZD50Transform(float[] forwardTransform1, float[] forwardTransform2, float[] calibrationTransform1, float[] calibrationTransform2, Rational[ ] neutralColorPoint, double interpolationFactor, float[] outputTransform)1221 private static void calculateCameraToXYZD50Transform(float[] forwardTransform1, 1222 float[] forwardTransform2, float[] calibrationTransform1, float[] calibrationTransform2, 1223 Rational[/*3*/] neutralColorPoint, double interpolationFactor, 1224 /*out*/float[] outputTransform) { 1225 float[] cameraNeutral = new float[] { neutralColorPoint[0].floatValue(), 1226 neutralColorPoint[1].floatValue(), neutralColorPoint[2].floatValue()}; 1227 if (DEBUG) Log.d(TAG, "Camera neutral: " + Arrays.toString(cameraNeutral)); 1228 1229 float[] interpolatedCC = new float[9]; 1230 lerp(calibrationTransform1, calibrationTransform2, interpolationFactor, 1231 interpolatedCC); 1232 float[] inverseInterpolatedCC = new float[9]; 1233 if (!invert(interpolatedCC, /*out*/inverseInterpolatedCC)) { 1234 throw new IllegalArgumentException( "Cannot invert interpolated calibration transform" + 1235 ", input matrices are invalid."); 1236 } 1237 if (DEBUG) Log.d(TAG, "Inverted interpolated CalibrationTransform: " + 1238 Arrays.toString(inverseInterpolatedCC)); 1239 1240 float[] referenceNeutral = new float[3]; 1241 map(inverseInterpolatedCC, cameraNeutral, /*out*/referenceNeutral); 1242 if (DEBUG) Log.d(TAG, "Reference neutral: " + Arrays.toString(referenceNeutral)); 1243 float maxNeutral = Math.max(Math.max(referenceNeutral[0], referenceNeutral[1]), 1244 referenceNeutral[2]); 1245 float[] D = new float[] { maxNeutral/referenceNeutral[0], 0, 0, 1246 0, maxNeutral/referenceNeutral[1], 0, 1247 0, 0, maxNeutral/referenceNeutral[2] }; 1248 if (DEBUG) Log.d(TAG, "Reference Neutral Diagonal: " + Arrays.toString(D)); 1249 1250 float[] intermediate = new float[9]; 1251 float[] intermediate2 = new float[9]; 1252 1253 lerp(forwardTransform1, forwardTransform2, interpolationFactor, /*out*/intermediate); 1254 if (DEBUG) Log.d(TAG, "Interpolated ForwardTransform: " + Arrays.toString(intermediate)); 1255 1256 multiply(D, inverseInterpolatedCC, /*out*/intermediate2); 1257 multiply(intermediate, intermediate2, /*out*/outputTransform); 1258 } 1259 1260 /** 1261 * Map a 3d column vector using the given matrix. 1262 * 1263 * @param matrix float array containing 3x3 matrix to map vector by. 1264 * @param input 3 dimensional vector to map. 1265 * @param output 3 dimensional vector result. 1266 */ map(float[] matrix, float[] input, float[] output)1267 private static void map(float[] matrix, float[] input, /*out*/float[] output) { 1268 output[0] = input[0] * matrix[0] + input[1] * matrix[1] + input[2] * matrix[2]; 1269 output[1] = input[0] * matrix[3] + input[1] * matrix[4] + input[2] * matrix[5]; 1270 output[2] = input[0] * matrix[6] + input[1] * matrix[7] + input[2] * matrix[8]; 1271 } 1272 1273 /** 1274 * Multiply two 3x3 matrices together: A * B 1275 * 1276 * @param a left matrix. 1277 * @param b right matrix. 1278 */ multiply(float[] a, float[] b, float[] output)1279 private static void multiply(float[] a, float[] b, /*out*/float[] output) { 1280 output[0] = a[0] * b[0] + a[1] * b[3] + a[2] * b[6]; 1281 output[3] = a[3] * b[0] + a[4] * b[3] + a[5] * b[6]; 1282 output[6] = a[6] * b[0] + a[7] * b[3] + a[8] * b[6]; 1283 output[1] = a[0] * b[1] + a[1] * b[4] + a[2] * b[7]; 1284 output[4] = a[3] * b[1] + a[4] * b[4] + a[5] * b[7]; 1285 output[7] = a[6] * b[1] + a[7] * b[4] + a[8] * b[7]; 1286 output[2] = a[0] * b[2] + a[1] * b[5] + a[2] * b[8]; 1287 output[5] = a[3] * b[2] + a[4] * b[5] + a[5] * b[8]; 1288 output[8] = a[6] * b[2] + a[7] * b[5] + a[8] * b[8]; 1289 } 1290 1291 /** 1292 * Transpose a 3x3 matrix in-place. 1293 * 1294 * @param m the matrix to transpose. 1295 * @return the transposed matrix. 1296 */ transpose( float[ ] m)1297 private static float[] transpose(/*inout*/float[/*9*/] m) { 1298 float t = m[1]; 1299 m[1] = m[3]; 1300 m[3] = t; 1301 t = m[2]; 1302 m[2] = m[6]; 1303 m[6] = t; 1304 t = m[5]; 1305 m[5] = m[7]; 1306 m[7] = t; 1307 return m; 1308 } 1309 1310 /** 1311 * Invert a 3x3 matrix, or return false if the matrix is singular. 1312 * 1313 * @param m matrix to invert. 1314 * @param output set the output to be the inverse of m. 1315 */ invert(float[] m, float[] output)1316 private static boolean invert(float[] m, /*out*/float[] output) { 1317 double a00 = m[0]; 1318 double a01 = m[1]; 1319 double a02 = m[2]; 1320 double a10 = m[3]; 1321 double a11 = m[4]; 1322 double a12 = m[5]; 1323 double a20 = m[6]; 1324 double a21 = m[7]; 1325 double a22 = m[8]; 1326 1327 double t00 = a11 * a22 - a21 * a12; 1328 double t01 = a21 * a02 - a01 * a22; 1329 double t02 = a01 * a12 - a11 * a02; 1330 double t10 = a20 * a12 - a10 * a22; 1331 double t11 = a00 * a22 - a20 * a02; 1332 double t12 = a10 * a02 - a00 * a12; 1333 double t20 = a10 * a21 - a20 * a11; 1334 double t21 = a20 * a01 - a00 * a21; 1335 double t22 = a00 * a11 - a10 * a01; 1336 1337 double det = a00 * t00 + a01 * t10 + a02 * t20; 1338 if (Math.abs(det) < 1e-9) { 1339 return false; // Inverse too close to zero, not invertible. 1340 } 1341 1342 output[0] = (float) (t00 / det); 1343 output[1] = (float) (t01 / det); 1344 output[2] = (float) (t02 / det); 1345 output[3] = (float) (t10 / det); 1346 output[4] = (float) (t11 / det); 1347 output[5] = (float) (t12 / det); 1348 output[6] = (float) (t20 / det); 1349 output[7] = (float) (t21 / det); 1350 output[8] = (float) (t22 / det); 1351 return true; 1352 } 1353 1354 /** 1355 * Scale each element in a matrix by the given scaling factor. 1356 * 1357 * @param factor factor to scale by. 1358 * @param matrix the float array containing a 3x3 matrix to scale. 1359 */ scale(float factor, float[] matrix)1360 private static void scale(float factor, /*inout*/float[] matrix) { 1361 for (int i = 0; i < 9; i++) { 1362 matrix[i] *= factor; 1363 } 1364 } 1365 1366 /** 1367 * Clamp a value to a given range. 1368 * 1369 * @param low lower bound to clamp to. 1370 * @param high higher bound to clamp to. 1371 * @param value the value to clamp. 1372 * @return the clamped value. 1373 */ clamp(double low, double high, double value)1374 private static double clamp(double low, double high, double value) { 1375 return Math.max(low, Math.min(high, value)); 1376 } 1377 1378 /** 1379 * Return the max float in the array. 1380 * 1381 * @param array array of floats to search. 1382 * @return max float in the array. 1383 */ max(float[] array)1384 private static float max(float[] array) { 1385 float val = array[0]; 1386 for (float f : array) { 1387 val = (f > val) ? f : val; 1388 } 1389 return val; 1390 } 1391 1392 /** 1393 * Normalize ColorMatrix to eliminate headroom for input space scaled to [0, 1] using 1394 * the D50 whitepoint. This maps the D50 whitepoint into the colorspace used by the 1395 * ColorMatrix, then uses the resulting whitepoint to renormalize the ColorMatrix so 1396 * that the channel values in the resulting whitepoint for this operation are clamped 1397 * to the range [0, 1]. 1398 * 1399 * @param colorMatrix a 3x3 matrix containing a DNG ColorMatrix to be normalized. 1400 */ normalizeCM( float[] colorMatrix)1401 private static void normalizeCM(/*inout*/float[] colorMatrix) { 1402 float[] tmp = new float[3]; 1403 map(colorMatrix, D50_XYZ, /*out*/tmp); 1404 float maxVal = max(tmp); 1405 if (maxVal > 0) { 1406 scale(1.0f / maxVal, colorMatrix); 1407 } 1408 } 1409 1410 /** 1411 * Normalize ForwardMatrix to ensure that sensor whitepoint [1, 1, 1] maps to D50 in CIE XYZ 1412 * colorspace. 1413 * 1414 * @param forwardMatrix a 3x3 matrix containing a DNG ForwardTransform to be normalized. 1415 */ normalizeFM( float[] forwardMatrix)1416 private static void normalizeFM(/*inout*/float[] forwardMatrix) { 1417 float[] tmp = new float[] {1, 1, 1}; 1418 float[] xyz = new float[3]; 1419 map(forwardMatrix, tmp, /*out*/xyz); 1420 1421 float[] intermediate = new float[9]; 1422 float[] m = new float[] {1.0f / xyz[0], 0, 0, 0, 1.0f / xyz[1], 0, 0, 0, 1.0f / xyz[2]}; 1423 1424 multiply(m, forwardMatrix, /*out*/ intermediate); 1425 float[] m2 = new float[] {D50_XYZ[0], 0, 0, 0, D50_XYZ[1], 0, 0, 0, D50_XYZ[2]}; 1426 multiply(m2, intermediate, /*out*/forwardMatrix); 1427 } 1428 } 1429