1# Copyright 2018 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14"""Verify multi-camera alignment using internal parameters."""
15
16
17import logging
18import math
19import os.path
20
21import cv2
22from mobly import test_runner
23import numpy as np
24
25import its_base_test
26import camera_properties_utils
27import capture_request_utils
28import image_processing_utils
29import its_session_utils
30import opencv_processing_utils
31
32_ALIGN_ATOL_MM = 5.0  # mm
33_ALIGN_RTOL = 0.01  # 1% of sensor diagonal in pixels
34_CHART_DISTANCE_RTOL = 0.1
35_CIRCLE_COLOR = 0  # [0: black, 255: white]
36_CIRCLE_MIN_AREA = 0.005  # multiplied by image size
37_CIRCLE_RTOL = 0.1  # 10%
38_CM_TO_M = 1E-2
39_M_TO_MM = 1E3
40_MM_TO_UM = 1E3
41_NAME = os.path.splitext(os.path.basename(__file__))[0]
42_REFERENCE_GYRO = 1
43_REFERENCE_UNDEFINED = 2
44_TEST_REQUIRED_MPC = 33
45_TRANS_MATRIX_REF = np.array([0, 0, 0])  # translation matrix for ref cam is 000
46
47
48def convert_cap_and_prep_img(cap, props, fmt, img_name):
49  """Convert the capture to an RGB image and prep image.
50
51  Args:
52    cap: capture element
53    props: dict of capture properties
54    fmt: capture format ('raw' or 'yuv')
55    img_name: name to save image as
56
57  Returns:
58    img uint8 numpy array
59  """
60
61  img = image_processing_utils.convert_capture_to_rgb_image(cap, props=props)
62
63  # save image
64  image_processing_utils.write_image(img, img_name)
65
66  # convert [0, 1] image to [0, 255] and cast as uint8
67  img = image_processing_utils.convert_image_to_uint8(img)
68
69  # scale to match calibration data if RAW
70  if fmt == 'raw':
71    img = cv2.resize(img, None, fx=2, fy=2)
72
73  return img
74
75
76def calc_pixel_size(props):
77  ar = props['android.sensor.info.pixelArraySize']
78  sensor_size = props['android.sensor.info.physicalSize']
79  pixel_size_w = sensor_size['width'] / ar['width']
80  pixel_size_h = sensor_size['height'] / ar['height']
81  logging.debug('pixel size(um): %.2f x %.2f',
82                pixel_size_w * _MM_TO_UM, pixel_size_h * _MM_TO_UM)
83  return (pixel_size_w + pixel_size_h) / 2 * _MM_TO_UM
84
85
86def select_ids_to_test(ids, props, chart_distance):
87  """Determine the best 2 cameras to test for the rig used.
88
89  Cameras are pre-filtered to only include supportable cameras.
90  Supportable cameras are: YUV(RGB), RAW(Bayer)
91
92  Args:
93    ids: unicode string; physical camera ids
94    props: dict; physical camera properties dictionary
95    chart_distance: float; distance to chart in meters
96  Returns:
97    test_ids to be tested
98  """
99  chart_distance = abs(chart_distance)*100  # convert M to CM
100  test_ids = []
101  for i in ids:
102    sensor_size = props[i]['android.sensor.info.physicalSize']
103    focal_l = props[i]['android.lens.info.availableFocalLengths'][0]
104    diag = math.sqrt(sensor_size['height'] ** 2 + sensor_size['width'] ** 2)
105    fov = round(2 * math.degrees(math.atan(diag / (2 * focal_l))), 2)
106    logging.debug('Camera: %s, FoV: %.2f, chart_distance: %.1fcm', i, fov,
107                  chart_distance)
108    # determine best combo with rig used or recommend different rig
109    if (opencv_processing_utils.FOV_THRESH_TELE < fov <
110        opencv_processing_utils.FOV_THRESH_UW):
111      test_ids.append(i)  # RFoV camera
112    elif fov < opencv_processing_utils.FOV_THRESH_TELE40:
113      logging.debug('Skipping camera. Not appropriate multi-camera testing.')
114      continue  # super-TELE camera
115    elif (fov <= opencv_processing_utils.FOV_THRESH_TELE and
116          math.isclose(chart_distance,
117                       opencv_processing_utils.CHART_DISTANCE_31CM,
118                       rel_tol=_CHART_DISTANCE_RTOL)):
119      test_ids.append(i)  # TELE camera in RFoV rig
120    elif (fov >= opencv_processing_utils.FOV_THRESH_UW and
121          math.isclose(chart_distance,
122                       opencv_processing_utils.CHART_DISTANCE_22CM,
123                       rel_tol=_CHART_DISTANCE_RTOL)):
124      test_ids.append(i)  # WFoV camera in WFoV rig
125    else:
126      logging.debug('Skipping camera. Not appropriate for test rig.')
127
128  if len(test_ids) < 2:
129    raise AssertionError('Error: started with 2+ cameras, reduced to <2 based '
130                         f'on FoVs. Wrong test rig? test_ids: {test_ids}')
131  return test_ids[0:2]
132
133
134def determine_valid_out_surfaces(cam, props, fmt, cap_camera_ids, sizes):
135  """Determine a valid output surfaces for captures.
136
137  Args:
138    cam:                obj; camera object
139    props:              dict; props for the physical cameras
140    fmt:                str; capture format ('yuv' or 'raw')
141    cap_camera_ids:     list; camera capture ids
142    sizes:              dict; valid physical sizes for the cap_camera_ids
143
144  Returns:
145    valid out_surfaces
146  """
147  valid_stream_combo = False
148
149  # try simultaneous capture
150  w, h = capture_request_utils.get_available_output_sizes('yuv', props)[0]
151  out_surfaces = [{'format': 'yuv', 'width': w, 'height': h},
152                  {'format': fmt, 'physicalCamera': cap_camera_ids[0],
153                   'width': sizes[cap_camera_ids[0]][0],
154                   'height': sizes[cap_camera_ids[0]][1]},
155                  {'format': fmt, 'physicalCamera': cap_camera_ids[1],
156                   'width': sizes[cap_camera_ids[1]][0],
157                   'height': sizes[cap_camera_ids[1]][1]},]
158  valid_stream_combo = cam.is_stream_combination_supported(out_surfaces)
159
160  # try each camera individually
161  if not valid_stream_combo:
162    out_surfaces = []
163    for cap_id in cap_camera_ids:
164      out_surface = {'format': fmt, 'physicalCamera': cap_id,
165                     'width': sizes[cap_id][0],
166                     'height': sizes[cap_id][1]}
167      valid_stream_combo = cam.is_stream_combination_supported(out_surface)
168      if valid_stream_combo:
169        out_surfaces.append(out_surface)
170      else:
171        camera_properties_utils.skip_unless(valid_stream_combo)
172
173  return out_surfaces
174
175
176def take_images(cam, caps, props, fmt, cap_camera_ids, out_surfaces):
177  """Capture images.
178
179  Args:
180    cam: obj; camera object
181    caps: dict; capture results indexed by (fmt, id)
182    props: dict; props for the physical cameras
183    fmt: str; capture format ('yuv' or 'raw')
184    cap_camera_ids: list; camera capture ids
185    out_surfaces: list; valid output surfaces for caps
186
187  Returns:
188    caps: dict; capture information indexed by (fmt, cap_id)
189  """
190
191  logging.debug('out_surfaces: %s', str(out_surfaces))
192  if len(out_surfaces) == 3:  # do simultaneous capture
193    # Do 3A without getting the values
194    cam.do_3a(lock_ae=True, lock_awb=True)
195    req = capture_request_utils.auto_capture_request(props=props, do_af=True)
196    _, caps[(fmt,
197             cap_camera_ids[0])], caps[(fmt,
198                                        cap_camera_ids[1])] = cam.do_capture(
199                                            req, out_surfaces)
200
201  else:  # step through cameras individually
202    for i, out_surface in enumerate(out_surfaces):
203      # Do 3A without getting the values
204      cam.do_3a(lock_ae=True, lock_awb=True)
205      req = capture_request_utils.auto_capture_request(props=props, do_af=True)
206      caps[(fmt, cap_camera_ids[i])] = cam.do_capture(req, out_surface)
207
208  return caps
209
210
211def undo_zoom(cap, circle):
212  """Correct coordinates and size of circle for zoom.
213
214  Assume that the maximum physical YUV image size is close to active array size.
215
216  Args:
217    cap: camera capture element
218    circle: dict of circle values
219  Returns:
220    unzoomed circle dict
221  """
222  yuv_w = cap['width']
223  yuv_h = cap['height']
224  logging.debug('cap size: %d x %d', yuv_w, yuv_h)
225  cr = cap['metadata']['android.scaler.cropRegion']
226  cr_w = cr['right'] - cr['left']
227  cr_h = cr['bottom'] - cr['top']
228
229  # Offset due to aspect ratio difference of crop region and yuv
230  # - fit yuv box inside of differently shaped cr box
231  yuv_aspect = yuv_w / yuv_h
232  relative_aspect = yuv_aspect / (cr_w/cr_h)
233  if relative_aspect > 1:
234    zoom_ratio = yuv_w / cr_w
235    yuv_x = 0
236    yuv_y = (cr_h - cr_w / yuv_aspect) / 2
237  else:
238    zoom_ratio = yuv_h / cr_h
239    yuv_x = (cr_w - cr_h * yuv_aspect) / 2
240    yuv_y = 0
241
242  circle['x'] = cr['left'] + yuv_x + circle['x'] / zoom_ratio
243  circle['y'] = cr['top'] + yuv_y + circle['y'] / zoom_ratio
244  circle['r'] = circle['r'] / zoom_ratio
245
246  logging.debug(' Calculated zoom ratio: %.3f', zoom_ratio)
247  logging.debug(' Corrected circle X: %.2f', circle['x'])
248  logging.debug(' Corrected circle Y: %.2f', circle['y'])
249  logging.debug(' Corrected circle radius: %.2f', circle['r'])
250
251  return circle
252
253
254def convert_to_world_coordinates(x, y, r, t, k, z_w):
255  """Convert x,y coordinates to world coordinates.
256
257  Conversion equation is:
258  A = [[x*r[2][0] - dot(k_row0, r_col0), x*r_[2][1] - dot(k_row0, r_col1)],
259       [y*r[2][0] - dot(k_row1, r_col0), y*r_[2][1] - dot(k_row1, r_col1)]]
260  b = [[z_w*dot(k_row0, r_col2) + dot(k_row0, t) - x*(r[2][2]*z_w + t[2])],
261       [z_w*dot(k_row1, r_col2) + dot(k_row1, t) - y*(r[2][2]*z_w + t[2])]]
262
263  [[x_w], [y_w]] = inv(A) * b
264
265  Args:
266    x: x location in pixel space
267    y: y location in pixel space
268    r: rotation matrix
269    t: translation matrix
270    k: intrinsic matrix
271    z_w: z distance in world space
272
273  Returns:
274    x_w:    x in meters in world space
275    y_w:    y in meters in world space
276  """
277  c_1 = r[2, 2] * z_w + t[2]
278  k_x1 = np.dot(k[0, :], r[:, 0])
279  k_x2 = np.dot(k[0, :], r[:, 1])
280  k_x3 = z_w * np.dot(k[0, :], r[:, 2]) + np.dot(k[0, :], t)
281  k_y1 = np.dot(k[1, :], r[:, 0])
282  k_y2 = np.dot(k[1, :], r[:, 1])
283  k_y3 = z_w * np.dot(k[1, :], r[:, 2]) + np.dot(k[1, :], t)
284
285  a = np.array([[x*r[2][0]-k_x1, x*r[2][1]-k_x2],
286                [y*r[2][0]-k_y1, y*r[2][1]-k_y2]])
287  b = np.array([[k_x3-x*c_1], [k_y3-y*c_1]])
288  return (float(x) for x in np.dot(np.linalg.inv(a), b))
289
290
291def convert_to_image_coordinates(p_w, r, t, k):
292  p_c = np.dot(r, p_w) + t
293  p_h = np.dot(k, p_c)
294  return p_h[0] / p_h[2], p_h[1] / p_h[2]
295
296
297def define_reference_camera(pose_reference, cam_reference):
298  """Determine the reference camera.
299
300  Args:
301    pose_reference: 0 for cameras, 1 for gyro
302    cam_reference: dict with key of physical camera and value True/False
303  Returns:
304    i_ref: physical id of reference camera
305    i_2nd: physical id of secondary camera
306  """
307
308  if pose_reference == _REFERENCE_GYRO:
309    logging.debug('pose_reference is GYRO')
310    keys = list(cam_reference.keys())
311    i_ref = keys[0]  # pick first camera as ref
312    i_2nd = keys[1]
313  else:
314    logging.debug('pose_reference is CAMERA')
315    i_refs = [k for (k, v) in cam_reference.items() if v]
316    i_ref = i_refs[0]
317    if len(i_refs) > 1:
318      logging.debug('Warning: more than 1 reference camera. Check translation '
319                    'matrices. cam_reference: %s', str(cam_reference))
320      i_2nd = i_refs[1]  # use second camera since both at same location
321    else:
322      i_2nd = next(k for (k, v) in cam_reference.items() if not v)
323  return i_ref, i_2nd
324
325
326class MultiCameraAlignmentTest(its_base_test.ItsBaseTest):
327
328  """Test the multi camera system parameters related to camera spacing.
329
330  Using the multi-camera physical cameras, take a picture of scene4
331  (a black circle and surrounding square on a white background) with
332  one of the physical cameras. Then find the circle center and radius. Using
333  the parameters:
334      android.lens.poseReference
335      android.lens.poseTranslation
336      android.lens.poseRotation
337      android.lens.instrinsicCalibration
338      android.lens.distortion (if available)
339  project the circle center to the world coordinates for each camera.
340  Compare the difference between the two cameras' circle centers in
341  world coordinates.
342
343  Reproject the world coordinates back to pixel coordinates and compare
344  against originals as a correctness check.
345
346  Compare the circle sizes if the focal lengths of the cameras are
347  different using
348      android.lens.availableFocalLengths.
349  """
350
351  def test_multi_camera_alignment(self):
352    # capture images
353    with its_session_utils.ItsSession(
354        device_id=self.dut.serial,
355        camera_id=self.camera_id,
356        hidden_physical_id=self.hidden_physical_id) as cam:
357      props = cam.get_camera_properties()
358      name_with_log_path = os.path.join(self.log_path, _NAME)
359      chart_distance = self.chart_distance * _CM_TO_M
360
361      # check media performance class
362      should_run = (camera_properties_utils.read_3a(props) and
363                    camera_properties_utils.per_frame_control(props) and
364                    camera_properties_utils.logical_multi_camera(props) and
365                    camera_properties_utils.backward_compatible(props))
366      media_performance_class = its_session_utils.get_media_performance_class(
367          self.dut.serial)
368      cameras_facing_same_direction = cam.get_facing_to_ids().get(
369          props['android.lens.facing'], [])
370      has_multiple_same_facing_cameras = len(cameras_facing_same_direction) > 1
371
372      if (media_performance_class >= _TEST_REQUIRED_MPC and
373          not should_run and
374          cam.is_primary_camera() and
375          has_multiple_same_facing_cameras and
376          (props['android.lens.facing'] ==
377           camera_properties_utils.LENS_FACING['BACK'])
378          ):
379        logging.error('Found multiple camera IDs %s facing in the same '
380                      'direction as primary camera %s.',
381                      cameras_facing_same_direction, self.camera_id)
382        its_session_utils.raise_mpc_assertion_error(
383            _TEST_REQUIRED_MPC, _NAME, media_performance_class)
384
385      # check SKIP conditions
386      camera_properties_utils.skip_unless(should_run)
387
388      # load chart for scene
389      its_session_utils.load_scene(
390          cam, props, self.scene, self.tablet, self.chart_distance)
391
392      pose_reference = props['android.lens.poseReference']
393
394      # Convert chart_distance for lens facing back
395      if (props['android.lens.facing'] ==
396          camera_properties_utils.LENS_FACING['BACK']):
397        # API spec defines +z is pointing out from screen
398        logging.debug('lens facing BACK')
399        chart_distance *= -1
400
401      # find physical camera IDs
402      ids = camera_properties_utils.logical_multi_camera_physical_ids(props)
403      physical_props = {}
404      physical_ids = []
405      physical_raw_ids = []
406      for i in ids:
407        physical_props[i] = cam.get_camera_properties_by_id(i)
408        if physical_props[i][
409            'android.lens.poseReference'] == _REFERENCE_UNDEFINED:
410          continue
411        # find YUV+RGB capable physical cameras
412        if (camera_properties_utils.backward_compatible(physical_props[i]) and
413            not camera_properties_utils.mono_camera(physical_props[i])):
414          physical_ids.append(i)
415        # find RAW+RGB capable physical cameras
416        if (camera_properties_utils.backward_compatible(physical_props[i]) and
417            not camera_properties_utils.mono_camera(physical_props[i]) and
418            camera_properties_utils.raw16(physical_props[i])):
419          physical_raw_ids.append(i)
420
421      # determine formats and select cameras
422      fmts = ['yuv']
423      if len(physical_raw_ids) >= 2:
424        fmts.insert(0, 'raw')  # add RAW to analysis if enough cameras
425        logging.debug('Selecting RAW+RGB supported cameras')
426        physical_raw_ids = select_ids_to_test(physical_raw_ids, physical_props,
427                                              chart_distance)
428      logging.debug('Selecting YUV+RGB cameras')
429      camera_properties_utils.skip_unless(len(physical_ids) >= 2)
430      physical_ids = select_ids_to_test(physical_ids, physical_props,
431                                        chart_distance)
432
433      # do captures for valid formats
434      caps = {}
435      for i, fmt in enumerate(fmts):
436        physical_sizes = {}
437        capture_cam_ids = physical_ids
438        fmt_code = capture_request_utils.FMT_CODE_YUV
439        if fmt == 'raw':
440          capture_cam_ids = physical_raw_ids
441          fmt_code = capture_request_utils.FMT_CODE_RAW
442        for physical_id in capture_cam_ids:
443          configs = physical_props[physical_id][
444              'android.scaler.streamConfigurationMap'][
445                  'availableStreamConfigurations']
446          fmt_configs = [cfg for cfg in configs if cfg['format'] == fmt_code]
447          out_configs = [cfg for cfg in fmt_configs if not cfg['input']]
448          out_sizes = [(cfg['width'], cfg['height']) for cfg in out_configs]
449          physical_sizes[physical_id] = max(out_sizes, key=lambda item: item[1])
450
451        out_surfaces = determine_valid_out_surfaces(
452            cam, props, fmt, capture_cam_ids, physical_sizes)
453        caps = take_images(
454            cam, caps, physical_props, fmt, capture_cam_ids, out_surfaces)
455
456    # process images for correctness
457    for j, fmt in enumerate(fmts):
458      size = {}
459      k = {}
460      cam_reference = {}
461      r = {}
462      t = {}
463      circle = {}
464      fl = {}
465      sensor_diag = {}
466      pixel_sizes = {}
467      capture_cam_ids = physical_ids
468      if fmt == 'raw':
469        capture_cam_ids = physical_raw_ids
470      logging.debug('Format: %s', str(fmt))
471      for i in capture_cam_ids:
472        # convert cap and prep image
473        img_name = f'{name_with_log_path}_{fmt}_{i}.jpg'
474        img = convert_cap_and_prep_img(
475            caps[(fmt, i)], physical_props[i], fmt, img_name)
476        size[i] = (caps[fmt, i]['width'], caps[fmt, i]['height'])
477
478        # load parameters for each physical camera
479        if j == 0:
480          logging.debug('Camera %s', i)
481        k[i] = camera_properties_utils.get_intrinsic_calibration(
482            physical_props[i], caps[fmt, i]['metadata'], j == 0)
483        r[i] = camera_properties_utils.get_rotation_matrix(
484            physical_props[i], j == 0)
485        t[i] = camera_properties_utils.get_translation_matrix(
486            physical_props[i], j == 0)
487
488        # API spec defines poseTranslation as the world coordinate p_w_cam of
489        # optics center. When applying [R|t] to go from world coordinates to
490        # camera coordinates, we need -R*p_w_cam of the coordinate reported in
491        # metadata.
492        # ie. for a camera with optical center at world coordinate (5, 4, 3)
493        # and identity rotation, to convert a world coordinate into the
494        # camera's coordinate, we need a translation vector of [-5, -4, -3]
495        # so that: [I|[-5, -4, -3]^T] * [5, 4, 3]^T = [0,0,0]^T
496        t[i] = -1.0 * np.dot(r[i], t[i])
497
498        if (t[i] == _TRANS_MATRIX_REF).all():
499          cam_reference[i] = True
500        else:
501          cam_reference[i] = False
502
503        # Correct lens distortion to image (if available) and save before/after
504        if (camera_properties_utils.distortion_correction(physical_props[i]) and
505            caps[fmt, i]['metadata'] and
506            fmt == 'raw'):
507          cv2_distort = camera_properties_utils.get_distortion_matrix(
508              physical_props[i])
509          if cv2_distort is None:
510            raise AssertionError(f'Camera {i} has no distortion matrix!')
511          if not np.any(cv2_distort):
512            logging.warning('Camera %s has distortion matrix of all zeroes', i)
513          image_processing_utils.write_image(
514              img/255, f'{name_with_log_path}_{fmt}_{i}.jpg')
515          img = cv2.undistort(img, k[i], cv2_distort)
516          image_processing_utils.write_image(
517              img/255, f'{name_with_log_path}_{fmt}_correct_{i}.jpg')
518
519        # Find the circles in grayscale image
520        circle[i] = opencv_processing_utils.find_circle(
521            img, f'{name_with_log_path}_{fmt}_gray_{i}.jpg',
522            _CIRCLE_MIN_AREA, _CIRCLE_COLOR)
523        logging.debug('Circle radius %s: %.2f', format(i), circle[i]['r'])
524
525        # Undo zoom to image (if applicable).
526        if fmt == 'yuv':
527          circle[i] = undo_zoom(caps[(fmt, i)], circle[i])
528
529        # Find focal length, pixel & sensor size
530        fl[i] = physical_props[i]['android.lens.info.availableFocalLengths'][0]
531        pixel_sizes[i] = calc_pixel_size(physical_props[i])
532        sensor_diag[i] = math.sqrt(size[i][0] ** 2 + size[i][1] ** 2)
533
534      i_ref, i_2nd = define_reference_camera(pose_reference, cam_reference)
535      logging.debug('reference camera: %s, secondary camera: %s', i_ref, i_2nd)
536
537      # Convert circle centers to real world coordinates
538      x_w = {}
539      y_w = {}
540      for i in [i_ref, i_2nd]:
541        x_w[i], y_w[i] = convert_to_world_coordinates(
542            circle[i]['x'], circle[i]['y'], r[i], t[i], k[i], chart_distance)
543
544      # Back convert to image coordinates for correctness check
545      x_p = {}
546      y_p = {}
547      x_p[i_2nd], y_p[i_2nd] = convert_to_image_coordinates(
548          [x_w[i_ref], y_w[i_ref], chart_distance], r[i_2nd], t[i_2nd],
549          k[i_2nd])
550      x_p[i_ref], y_p[i_ref] = convert_to_image_coordinates(
551          [x_w[i_2nd], y_w[i_2nd], chart_distance], r[i_ref], t[i_ref],
552          k[i_ref])
553
554      # Summarize results
555      for i in [i_ref, i_2nd]:
556        logging.debug(' Camera: %s', i)
557        logging.debug(' x, y (pixels): %.1f, %.1f', circle[i]['x'],
558                      circle[i]['y'])
559        logging.debug(' x_w, y_w (mm): %.2f, %.2f', x_w[i] * 1.0E3,
560                      y_w[i] * 1.0E3)
561        logging.debug(' x_p, y_p (pixels): %.1f, %.1f', x_p[i], y_p[i])
562
563      # Check center locations
564      err_mm = np.linalg.norm(np.array([x_w[i_ref], y_w[i_ref]]) -
565                              np.array([x_w[i_2nd], y_w[i_2nd]])) * _M_TO_MM
566      logging.debug('Center location err (mm): %.2f', err_mm)
567      if err_mm > _ALIGN_ATOL_MM:
568        raise AssertionError(
569            f'Centers {i_ref} <-> {i_2nd} too different! '
570            f'val={err_mm:.2f}, ATOL={_ALIGN_ATOL_MM} mm')
571
572      # Check projections back into pixel space
573      for i in [i_ref, i_2nd]:
574        err = np.linalg.norm(np.array([circle[i]['x'], circle[i]['y']]) -
575                             np.array([x_p[i], y_p[i]]).reshape(1, -1))
576        logging.debug('Camera %s projection error (pixels): %.1f', i, err)
577        tol = _ALIGN_RTOL * sensor_diag[i]
578        if err >= tol:
579          raise AssertionError(f'Camera {i} project location too different! '
580                               f'diff={err:.2f}, ATOL={tol:.2f} pixels')
581
582      # Check focal length and circle size if more than 1 focal length
583      if len(fl) > 1:
584        logging.debug('Circle radii (pixels); ref: %.1f, 2nd: %.1f',
585                      circle[i_ref]['r'], circle[i_2nd]['r'])
586        logging.debug('Focal lengths (diopters); ref: %.2f, 2nd: %.2f',
587                      fl[i_ref], fl[i_2nd])
588        logging.debug('Pixel size (um); ref: %.2f, 2nd: %.2f',
589                      pixel_sizes[i_ref], pixel_sizes[i_2nd])
590        if not math.isclose(circle[i_ref]['r']*pixel_sizes[i_ref]/fl[i_ref],
591                            circle[i_2nd]['r']*pixel_sizes[i_2nd]/fl[i_2nd],
592                            rel_tol=_CIRCLE_RTOL):
593          raise AssertionError(
594              f'Circle size scales improperly! RTOL: {_CIRCLE_RTOL} '
595              'Metric: radius*pixel_size/focal_length should be equal.')
596
597if __name__ == '__main__':
598  test_runner.main()
599