nerfbaselines

class nerfbaselines.Cameras(*args, **kwargs)[source]

Bases: GenericCameras[ndarray], Protocol

class nerfbaselines.Dataset[source]

Bases: _IncompleteDataset

cameras: Cameras
image_paths: List[str]
image_paths_root: str
images: ndarray | List[ndarray]
images_points3D_indices: List[ndarray] | None
metadata: Dict
points3D_rgb: ndarray | None
points3D_xyz: ndarray | None
sampling_mask_paths: List[str] | None
sampling_mask_paths_root: str | None
sampling_masks: ndarray | List[ndarray] | None
class nerfbaselines.DatasetLoaderSpec[source]

Bases: TypedDict

id: Required[str]
load_dataset_function: Required[str]
exception nerfbaselines.DatasetNotFoundError[source]

Bases: Exception

class nerfbaselines.DatasetSpec[source]

Bases: TypedDict

download_dataset_function: Required[str]
evaluation_protocol: str | EvaluationProtocolSpec
id: Required[str]
metadata: DatasetSpecMetadata
class nerfbaselines.DatasetSpecMetadata[source]

Bases: TypedDict

default_metric: str
description: str
id: str
licenses: List[LicenseSpec]
metrics: List[str]
name: str
paper_authors: List[str]
paper_title: str
scenes: List[Dict[str, str]]
class nerfbaselines.DownloadDatasetFunction(*args, **kwargs)[source]

Bases: Protocol

class nerfbaselines.EvaluationProtocol(*args, **kwargs)[source]

Bases: Protocol

accumulate_metrics(metrics: Iterable[Dict[str, float | int]]) Dict[str, float | int][source]
evaluate(predictions: Dict[str, ndarray], dataset: Dataset) Dict[str, float | int][source]
get_name() str[source]
render(method: Method, dataset: Dataset, *, options: RenderOptions | None = None) Dict[str, ndarray][source]
class nerfbaselines.EvaluationProtocolSpec[source]

Bases: TypedDict

evaluation_protocol_class: Required[str]
id: Required[str]
class nerfbaselines.GenericCameras(*args, **kwargs)[source]

Bases: Protocol[TTensor_co]

apply(fn: Callable[[TTensor_co, str], TTensor]) GenericCameras[TTensor][source]
property camera_models: TTensor_co

Camera models, [N]

classmethod cat(values: Sequence[Self]) Self[source]
property distortion_parameters: TTensor_co

Distortion parameters, [N, num_params]

property image_sizes: TTensor_co

Image sizes, [N, 2]

property intrinsics: TTensor_co

Intrinsics, [N, (fx,fy,cx,cy)]

item() Self[source]

Returns a single camera if there is only one. Otherwise raises an error.

property metadata: TTensor_co | None

Metadata, [N, …]

property nears_fars: TTensor_co | None

Near and far planes, [N, 2]

property poses: TTensor_co

Camera-to-world matrices, [N, (R, t)]

replace(**changes) Self[source]
class nerfbaselines.ImageSetInterpolationSource[source]

Bases: TypedDict

default_appearance: NotRequired[TrajectoryFrameAppearance | None]
default_fov: float
default_transition_duration: float
interpolation: Literal['none']
keyframes: List[TrajectoryKeyframe]
type: Literal['interpolation']
class nerfbaselines.KochanekBartelsInterpolationSource[source]

Bases: TypedDict

default_appearance: NotRequired[TrajectoryFrameAppearance | None]
default_fov: float
default_transition_duration: float
interpolation: Literal['kochanek-bartels']
is_cycle: bool
keyframes: List[TrajectoryKeyframe]
tension: float
type: Literal['interpolation']
class nerfbaselines.LicenseSpec[source]

Bases: TypedDict

name: Required[str]
url: str
class nerfbaselines.LoadDatasetFunction(*args, **kwargs)[source]

Bases: Protocol

class nerfbaselines.Logger(*args, **kwargs)[source]

Bases: Protocol

add_embedding(tag: str, embeddings: ndarray, step: int, *, images: List[ndarray] | None = None, labels: None | List[Dict[str, str]] | List[str] = None) None[source]
add_event(step: int) ContextManager[LoggerEvent][source]
add_image(tag: str, image: ndarray, step: int, *, display_name: str | None = None, description: str | None = None) None[source]
add_scalar(tag: str, value: float | int, step: int) None[source]
add_text(tag: str, text: str, step: int) None[source]
class nerfbaselines.LoggerEvent(*args, **kwargs)[source]

Bases: Protocol

add_embedding(tag: str, embeddings: ndarray, *, images: List[ndarray] | None = None, labels: None | List[Dict[str, str]] | List[str] = None) None[source]
add_histogram(tag: str, values: ndarray, *, num_bins: int | None = None) None[source]
add_image(tag: str, image: ndarray, display_name: str | None = None, description: str | None = None, **kwargs) None[source]
add_plot(tag: str, *data: ndarray, axes_labels: Sequence[str] | None = None, title: str | None = None, **kwargs) None[source]
add_scalar(tag: str, value: float | int) None[source]
add_text(tag: str, text: str) None[source]
class nerfbaselines.LoggerSpec[source]

Bases: TypedDict

id: Required[str]
logger_class: Required[str]
class nerfbaselines.Method(*, checkpoint: str | None = None, train_dataset: Dataset | None = None, config_overrides: Dict[str, Any] | None = None)[source]

Bases: Protocol

abstract get_info() ModelInfo[source]

Get method defaults for the trainer.

Returns:

Method info.

abstract classmethod get_method_info() MethodInfo[source]

Get method info needed to initialize the datasets.

Returns:

Method info.

get_train_embedding(index: int) ndarray | None[source]

Get the embedding for the given image index.

Parameters:

index – Image index.

Returns:

Image embedding.

optimize_embedding(dataset: Dataset, *, embedding: ndarray | None = None) OptimizeEmbeddingOutput[source]

Optimize embedding for a single image (passed as a dataset with a single image).

Parameters:
  • dataset – A dataset with a single image.

  • embeddings – Optional initial embedding.

abstract render(camera: Cameras, *, options: RenderOptions | None = None) Dict[str, ndarray][source]

Render single image.

Parameters:
  • camera – Camera from which the scene is to be rendered.

  • options – Optional rendering options.

abstract save(path: str) None[source]

Save model.

Parameters:

path – Path to save.

abstract train_iteration(step: int) Dict[str, float][source]

Train one iteration.

Parameters:

step – Current step.

class nerfbaselines.MethodInfo[source]

Bases: TypedDict

method_id: Required[str]
required_features: FrozenSet[Literal['color', 'points3D_xyz', 'points3D_rgb', 'images_points3D_indices']]
supported_camera_models: FrozenSet[Literal['pinhole', 'opencv', 'opencv_fisheye', 'full_opencv']]
supported_outputs: Tuple[str | RenderOutputType, ...]
class nerfbaselines.MethodSpec[source]

Bases: TypedDict

apptainer: NotRequired[Any]
backends_order: List[Literal['conda', 'docker', 'apptainer', 'python']]
conda: NotRequired[Any]
docker: NotRequired[Any]
id: Required[str]
implementation_status: Dict[str, Literal['working', 'reproducing', 'not-working', 'working-not-reproducing']]
metadata: Dict[str, Any]
method_class: Required[str]
output_artifacts: Dict[str, OutputArtifact]
presets: Dict[str, Dict[str, Any]]
required_features: List[Literal['color', 'points3D_xyz', 'points3D_rgb', 'images_points3D_indices']]
supported_camera_models: List[Literal['pinhole', 'opencv', 'opencv_fisheye', 'full_opencv']]
supported_outputs: List[str | RenderOutputType]
class nerfbaselines.ModelInfo[source]

Bases: TypedDict

batch_size: int
eval_batch_size: int
hparams: Dict[str, Any]
loaded_checkpoint: str | None
loaded_step: int | None
method_id: Required[str]
num_iterations: Required[int]
required_features: FrozenSet[Literal['color', 'points3D_xyz', 'points3D_rgb', 'images_points3D_indices']]
supported_camera_models: FrozenSet
supported_outputs: Tuple[str | RenderOutputType, ...]
class nerfbaselines.OptimizeEmbeddingOutput[source]

Bases: TypedDict

embedding: Required[ndarray]
metrics: NotRequired[Dict[str, Sequence[float]]]
render_output: NotRequired[Dict[str, ndarray]]
class nerfbaselines.OutputArtifact[source]

Bases: TypedDict

class nerfbaselines.RenderOptions[source]

Bases: TypedDict

embedding: ndarray | None
output_type_dtypes: Dict[str, str]
outputs: Tuple[str, ...]
class nerfbaselines.RenderOutputType[source]

Bases: TypedDict

name: Required[str]
type: Literal['color', 'depth']
class nerfbaselines.Trajectory[source]

Bases: TypedDict

appearances: NotRequired[List[TrajectoryFrameAppearance]]
camera_model: Literal['pinhole', 'opencv', 'opencv_fisheye', 'full_opencv']
fps: float
frames: List[TrajectoryFrame]
image_size: Tuple[int, int]
source: NotRequired[ImageSetInterpolationSource | KochanekBartelsInterpolationSource | None]
class nerfbaselines.TrajectoryFrame[source]

Bases: TypedDict

appearance_weights: NotRequired[ndarray]
intrinsics: ndarray
pose: ndarray
class nerfbaselines.TrajectoryFrameAppearance[source]

Bases: TypedDict

embedding: ndarray | None
embedding_train_index: int | None
class nerfbaselines.TrajectoryKeyframe[source]

Bases: TypedDict

appearance: NotRequired[TrajectoryFrameAppearance]
fov: float | None
pose: ndarray
transition_duration: NotRequired[float | None]
class nerfbaselines.UnloadedDataset[source]

Bases: _IncompleteDataset

cameras: Cameras
image_paths: List[str]
image_paths_root: str
images: NotRequired[ndarray | List[ndarray] | None]
images_points3D_indices: List[ndarray] | None
metadata: Dict
points3D_rgb: ndarray | None
points3D_xyz: ndarray | None
sampling_mask_paths: List[str] | None
sampling_mask_paths_root: str | None
sampling_masks: ndarray | List[ndarray] | None
nerfbaselines.build_method_class(spec: MethodSpec, backend: Literal['conda', 'docker', 'apptainer', 'python'] | None = None)[source]

Build a method class from a method spec. It automatically selects the backend based on the method spec if none is provided.

Parameters:
  • spec – Method spec

  • backend – Backend name

nerfbaselines.camera_model_from_int(i: int) Literal['pinhole', 'opencv', 'opencv_fisheye', 'full_opencv'][source]
nerfbaselines.camera_model_to_int(camera_model: Literal['pinhole', 'opencv', 'opencv_fisheye', 'full_opencv']) int[source]
nerfbaselines.get_dataset_loader_spec(id: str) DatasetLoaderSpec[source]

Get a dataset loader specification by registered dataset loader ID.

Parameters:

id – Dataset loader ID

Returns:

Dataset loader specification

nerfbaselines.get_dataset_spec(id: str) DatasetSpec[source]

Get a dataset specification by registered dataset ID.

Parameters:

id – Dataset ID

Returns:

Dataset specification

nerfbaselines.get_evaluation_protocol_spec(id: str) EvaluationProtocolSpec[source]

Get an evaluation protocol specification by registered evaluation protocol ID.

Parameters:

id – Evaluation protocol ID

Returns:

Evaluation protocol specification

nerfbaselines.get_logger_spec(id: str) LoggerSpec[source]

Get a logger specification by registered logger ID.

Parameters:

id – Logger ID

Returns:

Logger specification

nerfbaselines.get_method_spec(id: str) MethodSpec[source]

Get a method by method ID.

Parameters:

id – Method ID

Returns:

Method spec

nerfbaselines.get_supported_dataset_loaders() FrozenSet[str][source]

Get all supported dataset loaders. The loaders are sorted by priority.

Returns:

List of dataset loader IDs (sorted by priority)

nerfbaselines.get_supported_datasets() FrozenSet[str][source]

Get all supported datasets.

Returns:

Set of dataset IDs

nerfbaselines.get_supported_evaluation_protocols() FrozenSet[str][source]

Get all supported evaluation protocols.

Returns:

Set of evaluation protocol IDs

nerfbaselines.get_supported_loggers() FrozenSet[str][source]

Get all supported loggers.

Returns:

Set of logger IDs

nerfbaselines.get_supported_methods(backend_name: Literal['conda', 'docker', 'apptainer', 'python'] | None = None) FrozenSet[str][source]

Get all supported methods. Optionally, filter the methods that support a specific backend.

Parameters:

backend_name – Backend name

Returns:

Set of method IDs

nerfbaselines.load_checkpoint(checkpoint: str, backend: Literal['conda', 'docker', 'apptainer', 'python'] | None = None) Generator[Method, None, None][source]

This is a utility function to open the checkpoint directory, mount it, start the backend, build the model class and load the checkpoint. The checkpoint can be a local path, a remote path or a path inside a zip file. The function returns a context manager that yields the model instance.

Parameters:
  • checkpoint – Path to the checkpoint. Can be a local path or a remote path. Can also be a path inside a zip file.

  • backend – Backend name

Returns:

Context manager that yields the model instance

nerfbaselines.new_cameras(*, poses: ndarray, intrinsics: ndarray, camera_models: ndarray, image_sizes: ndarray, distortion_parameters: ndarray | None = None, nears_fars: ndarray | None = None, metadata: ndarray | None = None) Cameras[source]
nerfbaselines.new_dataset(*, cameras: Cameras, image_paths: Sequence[str], image_paths_root: str | None = None, images: ndarray | List[ndarray] | None = None, sampling_mask_paths: Sequence[str] | None = None, sampling_mask_paths_root: str | None = None, sampling_masks: ndarray | List[ndarray] | None = None, points3D_xyz: ndarray | None = None, points3D_rgb: ndarray | None = None, images_points3D_indices: Sequence[ndarray] | None = None, metadata: Dict | None = None) UnloadedDataset | Dataset[source]
nerfbaselines.register(spec: MethodSpec | DatasetSpec | DatasetLoaderSpec | EvaluationProtocolSpec | LoggerSpec) None[source]

Register a method, dataset, logger, or evaluation protocol spec.

Parameters:

spec – Spec to register (MethodSpec, DatasetSpec, DatasetLoaderSpec, EvaluationProtocolSpec, LoggerSpec)