Skip to content

Commit

Permalink
tune param
Browse files Browse the repository at this point in the history
  • Loading branch information
mieskolainen committed Jul 28, 2024
1 parent 37eb044 commit c8e3bfb
Showing 1 changed file with 10 additions and 10 deletions.
20 changes: 10 additions & 10 deletions configs/zee/models.yml
Original file line number Diff line number Diff line change
Expand Up @@ -233,11 +233,11 @@ lzmlp0: &LZMLP
#lossfunc: 'binary_Lq_entropy'
#q: 0.8 # Lq exponent (q < 1 -> high density vals emphasized, q > 1 then low emphasized)

SWD_beta: 5.0e-2 # Sliced Wasserstein [reweighting regularization]
SWD_beta: 1.0e-3 # Sliced Wasserstein [reweighting regularization]
SWD_p: 1 # p-norm (1,2,..), 1 perhaps more robust
SWD_num_slices: 10000 # Number of MC projections (higher the better)
SWD_num_slices: 1000 # Number of MC projections (higher the better)
SWD_mode: 'SWD' # 'SWD' (basic)
SWD_norm_weights: False # Normalization enforced
SWD_norm_weights: True # Normalization enforced

lipschitz_beta: 5.0e-5 # lipschitz regularization (use with 'lzmlp')
#logit_L1_beta: 1.0e-2 # logit norm reg. ~ beta * torch.sum(|logits|)
Expand Down Expand Up @@ -322,11 +322,11 @@ fastkan0: &FASTKAN
#lossfunc: 'binary_Lq_entropy' # binary_cross_entropy, cross_entropy, focal_entropy, logit_norm_cross_entropy
#q: 0.8 # Lq exponent (q < 1 -> high density vals emphasized, q > 1 then low emphasized)

SWD_beta: 5.0e-2 # Sliced Wasserstein [reweighting regularization]
SWD_beta: 1.0e-3 # Sliced Wasserstein [reweighting regularization]
SWD_p: 1 # p-norm (1,2,..), 1 perhaps more robust
SWD_num_slices: 10000 # Number of MC projections (higher the better)
SWD_num_slices: 1000 # Number of MC projections (higher the better)
SWD_mode: 'SWD' # 'SWD' (basic)
SWD_norm_weights: False # Normalization enforced
SWD_norm_weights: True # Normalization enforced

#lipshitz_beta: 1.0e-4 # Lipshitz regularization (use with 'lzmlp')
#logit_L1_beta: 1.0e-2 # logit norm reg. ~ beta * torch.sum(|logits|)
Expand Down Expand Up @@ -415,12 +415,12 @@ dmlp0: &DMLP
#lossfunc: 'binary_Lq_entropy'
#q: 0.8 # Lq exponent (q < 1 -> high density vals emphasized, q > 1 then low emphasized)

SWD_beta: 5.0e-2 # Sliced Wasserstein [reweighting regularization]
SWD_beta: 1.0e-3 # Sliced Wasserstein [reweighting regularization]
SWD_p: 1 # p-norm (1,2,..), 1 perhaps more robust
SWD_num_slices: 10000 # Number of MC projections (higher the better)
SWD_num_slices: 1000 # Number of MC projections (higher the better)
SWD_mode: 'SWD' # 'SWD' (basic)
SWD_norm_weights: False # Normalization enforced

SWD_norm_weights: True # Normalization enforced
#logit_L1_beta: 1.0e-2 # logit norm reg. ~ lambda * torch.sum(|logits|)
logit_L2_beta: 5.0e-3 # logit norm reg. ~ lambda * torch.sum(logits**2)

Expand Down

0 comments on commit c8e3bfb

Please sign in to comment.