Skip to content

Commit

Permalink
pydantic 2
Browse files Browse the repository at this point in the history
  • Loading branch information
lucidrains committed Jul 15, 2023
1 parent 00e07b7 commit 40843bc
Show file tree
Hide file tree
Showing 4 changed files with 19 additions and 14 deletions.
28 changes: 16 additions & 12 deletions dalle2_pytorch/train_configs.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import json
from torchvision import transforms as T
from pydantic import BaseModel, validator, root_validator
from pydantic import BaseModel, validator, model_validator
from typing import List, Optional, Union, Tuple, Dict, Any, TypeVar

from x_clip import CLIP as XCLIP
Expand Down Expand Up @@ -38,12 +38,12 @@ class TrainSplitConfig(BaseModel):
val: float = 0.15
test: float = 0.1

@root_validator
def validate_all(cls, fields):
actual_sum = sum([*fields.values()])
@model_validator(mode = 'after')
def validate_all(self, m):
actual_sum = sum([*dict(self).values()])
if actual_sum != 1.:
raise ValueError(f'{fields.keys()} must sum to 1.0. Found: {actual_sum}')
return fields
raise ValueError(f'{dict(self).keys()} must sum to 1.0. Found: {actual_sum}')
return self

class TrackerLogConfig(BaseModel):
log_type: str = 'console'
Expand All @@ -59,6 +59,7 @@ def create(self, data_path: str):
kwargs = self.dict()
return create_logger(self.log_type, data_path, **kwargs)


class TrackerLoadConfig(BaseModel):
load_from: Optional[str] = None
only_auto_resume: bool = False # Only attempt to load if the logger is auto-resuming
Expand Down Expand Up @@ -277,9 +278,9 @@ class Config:
extra = "allow"

class DecoderDataConfig(BaseModel):
webdataset_base_url: str # path to a webdataset with jpg images
img_embeddings_url: Optional[str] # path to .npy files with embeddings
text_embeddings_url: Optional[str] # path to .npy files with embeddings
webdataset_base_url: str # path to a webdataset with jpg images
img_embeddings_url: Optional[str] = None # path to .npy files with embeddings
text_embeddings_url: Optional[str] = None # path to .npy files with embeddings
num_workers: int = 4
batch_size: int = 64
start_shard: int = 0
Expand Down Expand Up @@ -346,11 +347,14 @@ class TrainDecoderConfig(BaseModel):
def from_json_path(cls, json_path):
with open(json_path) as f:
config = json.load(f)
print(config)
return cls(**config)

@root_validator
def check_has_embeddings(cls, values):
@model_validator(mode = 'after')
def check_has_embeddings(self, m):
# Makes sure that enough information is provided to get the embeddings specified for training
values = dict(self)

data_config, decoder_config = values.get('data'), values.get('decoder')

if not exists(data_config) or not exists(decoder_config):
Expand All @@ -375,4 +379,4 @@ def check_has_embeddings(cls, values):
if text_emb_url:
assert using_text_embeddings, "Text embeddings are being loaded, but text embeddings are not being conditioned on. This will slow down the dataloader for no reason."

return values
return m
2 changes: 1 addition & 1 deletion dalle2_pytorch/version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = '1.14.2'
__version__ = '1.15.1'
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
'numpy',
'packaging',
'pillow',
'pydantic',
'pydantic>=2',
'pytorch-warmup',
'resize-right>=0.0.2',
'rotary-embedding-torch',
Expand Down
1 change: 1 addition & 0 deletions train_decoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -577,6 +577,7 @@ def initialize_training(config: TrainDecoderConfig, config_path):
shards_per_process = len(all_shards) // world_size
assert shards_per_process > 0, "Not enough shards to split evenly"
my_shards = all_shards[rank * shards_per_process: (rank + 1) * shards_per_process]

dataloaders = create_dataloaders (
available_shards=my_shards,
img_preproc = config.data.img_preproc,
Expand Down

0 comments on commit 40843bc

Please sign in to comment.