nerfbaselines.viewer

nerfbaselines.viewer.deserialize_nb_info(info: dict) dict[source]
nerfbaselines.viewer.get_c2w(camera)[source]
nerfbaselines.viewer.get_orientation_transform(poses)[source]
nerfbaselines.viewer.get_position_quaternion(c2s)[source]
nerfbaselines.viewer.handle_cli_error(fn)[source]
nerfbaselines.viewer.open_any_directory(path: str | Path, mode: Literal['r', 'w'] = 'r') Iterator[str][source]
nerfbaselines.viewer.qvec2rotmat(qvec)[source]
nerfbaselines.viewer.rotation_matrix(a, b)[source]

Compute the rotation matrix that rotates vector a to vector b.

Parameters:
  • a – The vector to rotate.

  • b – The vector to rotate to.

Returns:

The rotation matrix.

nerfbaselines.viewer.rotmat2qvec(R)[source]
nerfbaselines.viewer.setup_logging(verbose: bool | Literal['disabled'])[source]

nerfbaselines.viewer.viser

class nerfbaselines.viewer.viser.BindableSource(getter, update, on_update)[source]

Bases: object

map(fn, fn_back=None)[source]
with_default(default)[source]
class nerfbaselines.viewer.viser.BindableViserServer(server: ViserServer)[source]

Bases: ViserServer

exception nerfbaselines.viewer.viser.CancelledException[source]

Bases: Exception

class nerfbaselines.viewer.viser.Dataset[source]

Bases: _IncompleteDataset

cameras: Cameras
image_paths: List[str]
image_paths_root: str
images: ndarray | List[ndarray]
images_points3D_indices: List[ndarray] | None
metadata: Dict
points3D_rgb: ndarray | None
points3D_xyz: ndarray | None
sampling_mask_paths: List[str] | None
sampling_mask_paths_root: str | None
sampling_masks: ndarray | List[ndarray] | None
class nerfbaselines.viewer.viser.EventCancellationToken(*args, **kwargs)[source]

Bases: CancellationToken

cancel()[source]
property cancelled
class nerfbaselines.viewer.viser.Keyframe(position: numpy.ndarray, wxyz: numpy.ndarray, fov: float | None = None, transition_duration: float | None = None, appearance_train_index: int | None = None)[source]

Bases: object

appearance_train_index: int | None = None
fov: float | None = None
position: ndarray
transition_duration: float | None = None
wxyz: ndarray
class nerfbaselines.viewer.viser.KochanekBartelsInterpolationSource[source]

Bases: TypedDict

default_appearance: NotRequired[TrajectoryFrameAppearance | None]
default_fov: float
default_transition_duration: float
interpolation: Literal['kochanek-bartels']
is_cycle: bool
keyframes: List[TrajectoryKeyframe]
tension: float
type: Literal['interpolation']
class nerfbaselines.viewer.viser.Trajectory[source]

Bases: TypedDict

appearances: NotRequired[List[TrajectoryFrameAppearance]]
camera_model: Literal['pinhole', 'opencv', 'opencv_fisheye', 'full_opencv']
fps: float
frames: List[TrajectoryFrame]
image_size: Tuple[int, int]
source: NotRequired[ImageSetInterpolationSource | KochanekBartelsInterpolationSource | None]
class nerfbaselines.viewer.viser.TrajectoryFrame[source]

Bases: TypedDict

appearance_weights: NotRequired[ndarray]
intrinsics: ndarray
pose: ndarray
class nerfbaselines.viewer.viser.TrajectoryFrameAppearance[source]

Bases: TypedDict

embedding: ndarray | None
embedding_train_index: int | None
class nerfbaselines.viewer.viser.TrajectoryKeyframe[source]

Bases: TypedDict

appearance: NotRequired[TrajectoryFrameAppearance]
fov: float | None
pose: ndarray
transition_duration: NotRequired[float | None]
class nerfbaselines.viewer.viser.ViewerRenderer(method: Method | None, expected_depth_scale=0.5)[source]

Bases: object

add_render_video_task(trajectory: Trajectory, callback, error_callback=None)[source]
property output_type_options
render(camera, *, embedding=None, allow_cancel=False, output_type=None, background_color=None, split_output_type: str | None = None, split_percentage: float | None = 0.5, output_aspect_ratio: float | None = None)[source]
update()[source]
class nerfbaselines.viewer.viser.ViewerState(resolution: int = 512, background_color: Tuple[int, int, int] = (38, 42, 55), output_type: Optional[str] = 'color', output_type_options: Tuple[str, ...] = ('color', ), composite_depth: bool = False, output_split: bool = False, split_percentage: float = 0.5, split_output_type: Optional[str] = None, show_train_cameras: bool = False, show_test_cameras: bool = False, show_input_points: bool = True, fps: str = '', preview_render: bool = False, preview_time: float = 0.0, preview_current_frame: int = 0, preview_is_playing: bool = False, render_resolution: Tuple[int, int] = (1920, 1080), render_fov: float = 75.0, render_appearance_train_index: Optional[int] = None, _temporary_appearance_train_index: Optional[int] = None, camera_path_interpolation: str = 'kochanek-bartels', camera_path_loop: bool = False, camera_path_tension: float = 0.0, camera_path_keyframes: Tuple[nerfbaselines.viewer.viser.Keyframe, ...] = (), camera_path_default_transition_duration: float = 2.0, camera_path_framerate: float = 30.0, camera_path_show_keyframes: bool = True, camera_path_move_keyframes: bool = False, camera_path_show_spline: bool = True, input_points: Optional[Tuple[numpy.ndarray, Optional[numpy.ndarray]]] = None, camera_frustums_train: Optional[Any] = None, camera_frustums_test: Optional[Any] = None, image_names_train: Tuple[str, ...] = (), supports_appearance_from_train_images: bool = False, _update_callbacks: List = <factory>)[source]

Bases: object

property b: BindableSource
background_color: Tuple[int, int, int] = (38, 42, 55)
camera_frustums_test: Any | None = None
camera_frustums_train: Any | None = None
camera_path_default_transition_duration: float = 2.0
camera_path_framerate: float = 30.0
camera_path_interpolation: str = 'kochanek-bartels'
camera_path_keyframes: Tuple[Keyframe, ...] = ()
camera_path_loop: bool = False
camera_path_move_keyframes: bool = False
camera_path_show_keyframes: bool = True
camera_path_show_spline: bool = True
camera_path_tension: float = 0.0
composite_depth: bool = False
fps: str = ''
get()[source]
get_trajectory(inv_transform) Trajectory[source]
image_names_train: Tuple[str, ...] = ()
input_points: Tuple[ndarray, ndarray | None] | None = None
load_trajectory(trajectory: Trajectory, transform) None[source]
on_update(callback)[source]
output_split: bool = False
output_type: str | None = 'color'
output_type_options: Tuple[str, ...] = ('color',)
preview_current_frame: int = 0
preview_is_playing: bool = False
preview_render: bool = False
preview_time: float = 0.0
render_appearance_train_index: int | None = None
render_fov: float = 75.0
render_resolution: Tuple[int, int] = (1920, 1080)
resolution: int = 512
show_input_points: bool = True
show_test_cameras: bool = False
show_train_cameras: bool = False
split_output_type: str | None = None
split_percentage: float = 0.5
supports_appearance_from_train_images: bool = False
update(**kwargs)[source]
class nerfbaselines.viewer.viser.ViserViewer(method: Method | None, port, dataset_metadata=None, state=None)[source]

Bases: object

add_dataset_views(dataset: Dataset, split: str)[source]
add_initial_point_cloud(points, colors)[source]
run()[source]
nerfbaselines.viewer.viser.apply_colormap(array: TTensor, *, pallete: str = 'viridis', invert: bool = False) TTensor[source]
nerfbaselines.viewer.viser.apply_transform(transform, poses)[source]
nerfbaselines.viewer.viser.assert_not_none(value: T | None) T[source]
nerfbaselines.viewer.viser.autobind(fn) Callable[[BindableSource | ViewerState], Any][source]
nerfbaselines.viewer.viser.dataset_index_select(dataset: TDataset, i: slice | int | list | ndarray) TDataset[source]
nerfbaselines.viewer.viser.dataset_load_features(dataset: UnloadedDataset, features=None, supported_camera_models=None) Dataset[source]
nerfbaselines.viewer.viser.get_c2w(position, wxyz)[source]
nerfbaselines.viewer.viser.get_position_quaternion(c2s)[source]
nerfbaselines.viewer.viser.get_transform_and_scale(transform)[source]
nerfbaselines.viewer.viser.image_to_srgb(tensor, dtype, color_space: str | None = None, allow_alpha: bool = False, background_color: ndarray | None = None)[source]
nerfbaselines.viewer.viser.invert_transform(transform, has_scale=False)[source]
nerfbaselines.viewer.viser.load_dataset(path: Path | str, split: str, features: FrozenSet[Literal['color', 'points3D_xyz', 'points3D_rgb']] | None = None, supported_camera_models: FrozenSet[Literal['pinhole', 'opencv', 'opencv_fisheye', 'full_opencv']] | None = None, load_features: bool = True, **kwargs) Dataset | UnloadedDataset[source]
nerfbaselines.viewer.viser.load_trajectory(file) Trajectory[source]
nerfbaselines.viewer.viser.new_cameras(*, poses: ndarray, intrinsics: ndarray, camera_types: ndarray, distortion_parameters: ndarray, image_sizes: ndarray, nears_fars: ndarray | None = None, metadata: ndarray | None = None) Cameras[source]
nerfbaselines.viewer.viser.pad_poses(p)[source]

Pad […, 3, 4] pose matrices with a homogeneous bottom row [0,0,0,1].

nerfbaselines.viewer.viser.pad_to_aspect_ratio(img, aspect_ratio)[source]
nerfbaselines.viewer.viser.render_frames(method: Method, cameras: Cameras, output: str | Path, fps: float, embeddings: List[ndarray] | None = None, description: str = 'rendering frames', output_type: Literal['color', 'depth'] = 'color', nb_info: dict | None = None) None[source]
nerfbaselines.viewer.viser.run_viser_viewer(method: Method | None = None, data=None, port=6006, nb_info=None)[source]
nerfbaselines.viewer.viser.safe_eq(a, b) bool[source]

Check if a and b are equal, even if they are numpy arrays

nerfbaselines.viewer.viser.save_trajectory(trajectory: Trajectory, file) None[source]
nerfbaselines.viewer.viser.simple_cache(fn)[source]
nerfbaselines.viewer.viser.state_compute_duration(state)[source]
nerfbaselines.viewer.viser.three_js_perspective_camera_focal_length(fov: float, image_height: int)[source]

Returns the focal length of a three.js perspective camera.

Parameters:
  • fov – the field of view of the camera in degrees.

  • image_height – the height of the image in pixels.

See:

https://github.com/nerfstudio-project/nerfstudio/blob/1aba4ea7a29b05e86f5d223245a573e7dcd86caa/nerfstudio/viewer_legacy/server/utils.py#L52

nerfbaselines.viewer.viser.trajectory_get_cameras(trajectory: Trajectory) Cameras[source]
nerfbaselines.viewer.viser.trajectory_get_embeddings(method: Method, trajectory: Trajectory) List[ndarray] | None[source]
nerfbaselines.viewer.viser.transform_points(transform, points)[source]
nerfbaselines.viewer.viser.visualize_depth(depth: ndarray, expected_scale: float | None = None, near_far: ndarray | None = None, pallete: str = 'viridis') ndarray[source]