Skip to content

Commit

Permalink
clean prints
Browse files Browse the repository at this point in the history
  • Loading branch information
mieskolainen committed Oct 21, 2024
1 parent dd1e63a commit d0e2f64
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 6 deletions.
14 changes: 9 additions & 5 deletions icenet/deep/autogradxgb.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,10 @@

from typing import Callable, Sequence, List, Tuple

# ------------------------------------------
from icenet import print
# ------------------------------------------

class XgboostObjective():
def __init__(self, loss_func: Callable[[Tensor, Tensor], Tensor], mode='train', loss_sign=1,
flatten_grad=False, hessian_mode='constant', hessian_const=1.0, device='cpu'):
Expand All @@ -23,9 +27,9 @@ def __init__(self, loss_func: Callable[[Tensor, Tensor], Tensor], mode='train',
self.flatten_grad = flatten_grad

if self.hessian_mode == 'constant':
print(__name__ + f': Using device: {self.device} | hessian_mode = {self.hessian_mode} | hessian_const = {self.hessian_const}')
print(f'Using device: {self.device} | hessian_mode = {self.hessian_mode} | hessian_const = {self.hessian_const}')
else:
print(__name__ + f': Using device: {self.device} | hessian_mode = {self.hessian_mode}')
print(f'Using device: {self.device} | hessian_mode = {self.hessian_mode}')

def __call__(self, preds: np.ndarray, targets: xgboost.DMatrix):

Expand All @@ -38,7 +42,7 @@ def __call__(self, preds: np.ndarray, targets: xgboost.DMatrix):
loss = self.loss_sign * self.loss_func(preds=preds_, targets=targets_, weights=weights_)
return 'custom', loss.detach().cpu().numpy()
else:
raise Exception('XgboostObjective: Unknown mode (set either "train" or "eval")')
raise Exception('Unknown mode (set either "train" or "eval")')

def torch_conversion(self, preds: np.ndarray, targets: xgboost.DMatrix):
""" Conversion from xgboost.Dmatrix object
Expand Down Expand Up @@ -74,14 +78,14 @@ def derivatives(self, loss: Tensor, preds: Tensor):
# Exact autograd
elif self.hessian_mode == 'exact':

print(__name__ + f'.derivatives: Computing Hessian diagonal with exact autograd ...')
print('Computing Hessian diagonal with exact autograd ...')

for i in tqdm(range(len(preds))): # Can be very slow
grad2_i = torch.autograd.grad(grad1[i], preds, retain_graph=True)[0]
grad2[i] = grad2_i[i]

else:
raise Exception(__name__ + f'.derivatives: Unknown "hessian_mode" {self.hessian_mode}')
raise Exception('Unknown "hessian_mode" {self.hessian_mode}')

grad1, grad2 = grad1.detach().cpu().numpy(), grad2.detach().cpu().numpy()

Expand Down
2 changes: 1 addition & 1 deletion icezee/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def load_helper(mcfiles, datafiles, maxevents, args):
for f in datafiles:
new_frame = copy.deepcopy(pd.read_parquet(f))
frames.append(new_frame)
print(__name__ + f'.load_helper: {f} | N = {len(new_frame)}', 'yellow')
print(f'{f} | N = {len(new_frame)}', 'yellow')
ids = list(new_frame.keys()); ids.sort()
print(ids)

Expand Down

0 comments on commit d0e2f64

Please sign in to comment.