Generalize HF datasets to a collection of HF dataasets via datasets, adds support for custom chat HF datasets (#1088), and fixes (#1087)

This commit is contained in:
Chime Ogbuji 2024-11-03 19:11:54 -05:00 committed by Awni Hannun
parent 3496cbea46
commit 14a75f3f03

View File

@ -34,14 +34,15 @@ class ChatDataset:
https://platform.openai.com/docs/guides/fine-tuning/example-format https://platform.openai.com/docs/guides/fine-tuning/example-format
""" """
def __init__(self, data: List[Dict[str, str]], tokenizer: PreTrainedTokenizer): def __init__(self, data: List[Dict[str, str]], tokenizer: PreTrainedTokenizer, chat_key: str = "messages"):
self._data = [ self._data = [
tokenizer.apply_chat_template( tokenizer.apply_chat_template(
d["messages"], d[chat_key],
tools=d.get("tools", None), tools=d.get("tools", None),
) )
for d in data for d in data
] ]
self._chat_key = chat_key
def __getitem__(self, idx: int): def __getitem__(self, idx: int):
return self._data[idx] return self._data[idx]
@ -84,6 +85,29 @@ class CompletionsDataset:
return len(self._data) return len(self._data)
class CompletionsDatasetCollection:
def __init__(self, data: List[Union[ChatDataset, CompletionsDataset]]):
self.collection = data
def __getitem__(self, idx: int):
item = next(self.collection)
curr_idx = idx
while True:
try:
if (curr_idx + 1) < len(item):
return item[curr_idx]
else:
curr_idx -= len(item)
item = next(self.collection)
except StopIteration:
raise IndexError(idx)
def __len__(self):
return sum(map(len, self.collection))
def create_dataset( def create_dataset(
data, data,
tokenizer: PreTrainedTokenizer, tokenizer: PreTrainedTokenizer,
@ -157,14 +181,14 @@ def load_hf_dataset(
def load_custom_hf_dataset(args, tokenizer: PreTrainedTokenizer): def load_custom_hf_dataset(args, tokenizer: PreTrainedTokenizer):
import datasets import datasets
hf_args = args.hf_dataset def create_hf_dataset(
dataset_name = hf_args["name"] dataset_name,
print(f"Loading Hugging Face dataset {dataset_name}.") text_feature,
text_feature = hf_args.get("text_feature") prompt_feature,
prompt_feature = hf_args.get("prompt_feature") completion_feature,
completion_feature = hf_args.get("completion_feature") chat_feature,
split,
def create_hf_dataset(split: str = None): ):
ds = datasets.load_dataset( ds = datasets.load_dataset(
dataset_name, dataset_name,
split=split, split=split,
@ -172,28 +196,62 @@ def load_custom_hf_dataset(args, tokenizer: PreTrainedTokenizer):
) )
if prompt_feature and completion_feature: if prompt_feature and completion_feature:
return CompletionsDataset(ds, tokenizer, prompt_feature, completion_feature) return CompletionsDataset(ds, tokenizer, prompt_feature, completion_feature)
elif chat_feature:
return ChatDataset(ds, tokenizer, chat_key=chat_feature)
elif text_feature: elif text_feature:
return Dataset(ds, tokenizer, text_key=text_feature) return Dataset(ds, tokenizer, text_key=text_feature)
else: else:
raise ValueError( raise ValueError(
"Specify either a prompt and completion feature or a text " "Specify either a prompt and completion feature, a chat feature,"
"feature for the Hugging Face dataset." " or a text feature for the Hugging Face dataset."
) )
def get_train_and_valid_splits(hf_args, ds_name):
text_f = hf_args.get("text_feature", None)
prompt_f = hf_args.get("prompt_feature", None)
completion_f = hf_args.get("completion_feature", None)
chat_f = hf_args.get("chat_feature", None)
if args.train: if args.train:
train_split = hf_args.get("train_split", "train[:80%]") train_split = hf_args.get("train_split", "train[:80%]")
valid_split = hf_args.get("valid_split", "train[-10%:]") valid_split = hf_args.get("valid_split", "train[-10%:]")
train = create_hf_dataset(split=train_split) train = create_hf_dataset(
valid = create_hf_dataset(split=valid_split) ds_name, text_f, prompt_f, completion_f, chat_f, split=train_split
)
valid = create_hf_dataset(
ds_name, text_f, prompt_f, completion_f, chat_f, split=valid_split
)
else: else:
train, valid = [], [] train, valid = [], []
if args.test: if args.test:
test = create_hf_dataset(split=hf_args.get("test_split")) test_split = hf_args.get("test_split")
test = create_hf_dataset(
ds_name, text_f, prompt_f, completion_f, chat_f, split=test_split,
)
else: else:
test = [] test = []
return train, valid, test return train, valid, test
if args.datasets:
dataset_collection = args.hf_datasets
else:
dataset_collection = {"hf_dataset": args.hf_dataset}
datasets = []
for ds in dataset_collection:
hf_args = ds["hf_dataset"]
dataset_name = hf_args["name"]
print(f"Loading Hugging Face dataset {dataset_name}.")
datasets.append(get_splits(hf_args, dataset_name))
if len(datsets) == 1:
return *datasets
# Otherwise concatenate them
train, valid, test = zip(*datasets)
return tuple(map, Concatenate, zip(*datasets))
def load_dataset(args, tokenizer: PreTrainedTokenizer): def load_dataset(args, tokenizer: PreTrainedTokenizer):
if getattr(args, "hf_dataset", False): if getattr(args, "hf_dataset", False):