# Copyright 2022 Cerebras Systems.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Config classes of T5 data Configs
"""
from dataclasses import dataclass
from typing import List, Optional, Union
from cerebras.modelzoo.common.registry import registry
from cerebras.modelzoo.config_manager.config_classes.base.base_config import (
required,
)
from cerebras.modelzoo.config_manager.config_classes.base.data_config import (
DataProcessorConfig,
)
from cerebras.modelzoo.data.common.config import HDF5IterableDataProcessorConfig
[docs]@registry.register_data_config("T5DynamicDataProcessor")
@dataclass
class T5DynamicDataProcessorConfig(DataProcessorConfig):
src_data_dir: str = required
src_vocab_file: str = required
src_max_sequence_length: int = required
tgt_max_sequence_length: int = required
shuffle_buffer: Optional[int] = None
do_lower: bool = False
buckets: Optional[List[int]] = None
dynamic_loss_weight: Optional[bool] = None
pack_sequences: Optional[bool] = False
num_documents_to_concatenate: int = 128
num_workers: int = 0
drop_last: bool = True
prefetch_factor: int = 10
persistent_workers: bool = True
oov_token: str = "<unk>"
sos_token: str = "<s>"
eos_token: str = "</s>"
pad_token: str = "<pad>"
extra_ids: Union[int, List[int]] = 0
labels_pad_id: int = 0
input_pad_id: int = 0
[docs]@registry.register_data_config("T5HDF5DataProcessor")
@dataclass
class T5HDF5DataProcessorConfig(HDF5IterableDataProcessorConfig):
data_dir: Union[str, List[str]] = required
"The path to the HDF5 files."
num_workers: int = 0
drop_last: bool = True
"""
similar to the PyTorch drop_last setting
except that samples that when set to True, samples that would
have been dropped at the end of one epoch are yielded at the
start of the next epoch so that there is no data loss. This is
necessary for a data ordering that is independent of the
distributed setup being used.
"""
use_vsl: bool = True
""" Flag to enable variable sequence length training.
It requires the dataset to have two extra features"""