Skip to content

Commit ea7080b

Browse files
krammnicmori360
authored andcommitted
Fix typo in multimodal_datasets.rst (pytorch#1787)
1 parent d79bf29 commit ea7080b

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

docs/source/basics/multimodal_datasets.rst

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ in the text, ``"<image>"`` for where to place the image tokens. This will get re
4545
from torchtune.models.llama3_2_vision import llama3_2_vision_transform
4646
from torchtune.datasets.multimodal import multimodal_chat_dataset
4747
48-
transform = Llama3VisionTransform(
48+
model_transform = Llama3VisionTransform(
4949
path="/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model",
5050
prompt_template="torchtune.data.QuestionAnswerTemplate",
5151
max_seq_len=8192,
@@ -64,7 +64,7 @@ in the text, ``"<image>"`` for where to place the image tokens. This will get re
6464
split="train",
6565
)
6666
tokenized_dict = ds[0]
67-
print(transform.decode(tokenized_dict["tokens"], skip_special_tokens=False))
67+
print(model_transform.decode(tokenized_dict["tokens"], skip_special_tokens=False))
6868
# '<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\nQuestion:<|image|>What time is it on the clock?Answer:<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nIt is 10:00AM.<|eot_id|>'
6969
print(tokenized_dict["encoder_input"]["images"][0].shape) # (num_tiles, num_channels, tile_height, tile_width)
7070
# torch.Size([4, 3, 224, 224])
@@ -120,7 +120,7 @@ For most datasets, you will also need to specify the ``split`` and/or the subset
120120
from torchtune.models.llama3_2_vision import llama3_2_vision_transform
121121
from torchtune.datasets.multimodal import multimodal_chat_dataset
122122
123-
transform = llama3_2_vision_transform(
123+
model_transform = llama3_2_vision_transform(
124124
path="/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model",
125125
max_seq_len=8192,
126126
image_size=560,

0 commit comments

Comments
 (0)