cerebras.modelzoo.data.vision.segmentation.config.Hdf5BaseIterDataProcessorConfig#
- class cerebras.modelzoo.data.vision.segmentation.config.Hdf5BaseIterDataProcessorConfig(batch_size: int = <object object at 0x7f0436677b60>, shuffle: bool = True, shuffle_seed: int = 0, num_workers: int = 0, prefetch_factor: int = 10, persistent_workers: bool = True, use_worker_cache: bool = <object object at 0x7f0436677b60>, data_dir: Union[str, List[str]] = <factory>, num_classes: int = <object object at 0x7f0436677b60>, image_shape: List[int] = <factory>, loss: str = <object object at 0x7f0436677b60>, normalize_data_method: Optional[str] = None, augment_data: bool = True, shuffle_buffer: Optional[int] = None, drop_last: bool = True, mixed_precision: Optional[bool] = None)[source]#
- use_worker_cache: bool = <object object>#
- data_dir: Union[str, List[str]]#
- num_classes: int = <object object>#
- image_shape: List[int]#
- loss: str = <object object>#
- normalize_data_method: Optional[str] = None#
- augment_data: bool = True#
- num_workers: int = 0#
The number of PyTorch processes used in the dataloader
- shuffle_buffer: Optional[int] = None#
- drop_last: bool = True#
- prefetch_factor: int = 10#
The number of batches to prefetch in the dataloader
- persistent_workers: bool = True#
Whether or not to keep workers persistent between epochs
- mixed_precision: Optional[bool] = None#
- batch_size: int = <object object>#
Batch size to be used
- shuffle: bool = True#
Whether or not to shuffle the dataset
- shuffle_seed: int = 0#
Seed used for deterministic shuffling