From 64e4eb5ee7de9515858d71df009c682e1b1d7063 Mon Sep 17 00:00:00 2001 From: Gijs Molenaar Date: Wed, 15 May 2019 11:59:53 +0200 Subject: [PATCH 01/25] run 2to3 --- cubical/data_handler/MBTiggerSim.py | 2 +- cubical/data_handler/TiggerSourceProvider.py | 2 +- cubical/data_handler/__init__.py | 6 +- cubical/data_handler/ms_data_handler.py | 270 +++++++++--------- cubical/data_handler/ms_tile.py | 142 ++++----- cubical/database/iface_database.py | 3 +- cubical/database/parameter.py | 58 ++-- cubical/database/pickled_db.py | 70 ++--- cubical/flagging.py | 52 ++-- cubical/kernels/__init__.py | 2 +- cubical/machines/abstract_machine.py | 34 ++- cubical/machines/complex_2x2_machine.py | 2 +- cubical/machines/complex_W_2x2_machine.py | 2 +- cubical/machines/ifr_gain_machine.py | 26 +- cubical/machines/interval_gain_machine.py | 14 +- cubical/machines/jones_chain_machine.py | 20 +- .../machines/jones_chain_robust_machine.py | 20 +- cubical/machines/machine_types.py | 8 +- cubical/machines/parallactic_machine.py | 4 +- cubical/machines/phase_diag_machine.py | 2 +- cubical/machines/slope_machine.py | 14 +- cubical/madmax/flagger.py | 67 +++-- cubical/madmax/plots.py | 42 +-- cubical/main.py | 100 +++---- cubical/param_db.py | 18 +- cubical/plots/__init__.py | 2 +- cubical/plots/ifrgains.py | 12 +- cubical/plots/stats.py | 10 +- cubical/solver.py | 16 +- cubical/statistics.py | 26 +- cubical/tools/ClassPrint.py | 12 +- cubical/tools/ModColor.py | 14 +- cubical/tools/NpShared.py | 56 ++-- cubical/tools/dynoptparse.py | 40 +-- cubical/tools/logger.py | 6 +- cubical/tools/parsets.py | 12 +- cubical/tools/shared_dict.py | 28 +- cubical/tools/shm_utils.py | 10 +- cubical/workers.py | 74 ++--- test/benchmark/kernel_timings.py | 30 +- test/d147_test.py | 4 +- test/kernels_test.py | 2 +- 42 files changed, 665 insertions(+), 669 deletions(-) diff --git a/cubical/data_handler/MBTiggerSim.py b/cubical/data_handler/MBTiggerSim.py index 12bfdf01..2fbb7d8e 100644 --- a/cubical/data_handler/MBTiggerSim.py +++ b/cubical/data_handler/MBTiggerSim.py @@ -234,7 +234,7 @@ def model_vis(self, context): else: sel = slice(None) - for ddid_ind in xrange(self._nddid): + for ddid_ind in range(self._nddid): offset = ddid_ind*rows_per_ddid lr = lower + offset ur = upper + offset diff --git a/cubical/data_handler/TiggerSourceProvider.py b/cubical/data_handler/TiggerSourceProvider.py index 0c8202e4..d92663bf 100644 --- a/cubical/data_handler/TiggerSourceProvider.py +++ b/cubical/data_handler/TiggerSourceProvider.py @@ -44,7 +44,7 @@ def __init__(self, lsm, phase_center, dde_tag='dE'): self._freqs = None self._clusters = cluster_sources(self._sm, dde_tag) - self._cluster_keys = self._clusters.keys() + self._cluster_keys = list(self._clusters.keys()) self._nclus = len(self._cluster_keys) self._target_key = 0 diff --git a/cubical/data_handler/__init__.py b/cubical/data_handler/__init__.py index ce23c538..2bd6d286 100644 --- a/cubical/data_handler/__init__.py +++ b/cubical/data_handler/__init__.py @@ -17,7 +17,7 @@ def uniquify(values): uniq = np.array(sorted(set(values))) rmap = {x: i for i, x in enumerate(uniq)} # apply this map to the time column to construct a timestamp column - indices = np.fromiter(map(rmap.__getitem__, values), int) + indices = np.fromiter(list(map(rmap.__getitem__, values)), int) return indices, uniq, rmap # Try to import montblanc: if not successful, remember error for later. @@ -30,8 +30,8 @@ def import_montblanc(): import montblanc # all of these potentially fall over if Montblanc is the wrong version or something, so moving them here # for now - from MBTiggerSim import simulate, MSSourceProvider, ColumnSinkProvider - from TiggerSourceProvider import TiggerSourceProvider + from .MBTiggerSim import simulate, MSSourceProvider, ColumnSinkProvider + from .TiggerSourceProvider import TiggerSourceProvider from montblanc.impl.rime.tensorflow.sources import CachedSourceProvider, FitsBeamSourceProvider return montblanc, None except: diff --git a/cubical/data_handler/ms_data_handler.py b/cubical/data_handler/ms_data_handler.py index 566ea920..ec4ce2f8 100644 --- a/cubical/data_handler/ms_data_handler.py +++ b/cubical/data_handler/ms_data_handler.py @@ -5,7 +5,7 @@ import numpy as np from collections import OrderedDict import pyrap.tables as pt -import cPickle +import pickle import re import traceback import math @@ -58,11 +58,11 @@ def _parse_slice(arg, what="slice"): m1 = re.match("(\d*)~(\d*)(:(\d+))?$", arg) m2 = re.match("(\d*):(\d*)(:(\d+))?$", arg) if m1: - i0, i1, i2 = [ int(x) if x else None for x in m1.group(1),m1.group(2),m1.group(4) ] + i0, i1, i2 = [ int(x) if x else None for x in (m1.group(1),m1.group(2),m1.group(4)) ] if i1 is not None: i1 += 1 elif m2: - i0, i1, i2 = [ int(x) if x else None for x in m2.group(1),m2.group(2),m2.group(4) ] + i0, i1, i2 = [ int(x) if x else None for x in (m2.group(1),m2.group(2),m2.group(4)) ] else: raise ValueError("can't parse '{}' as a {}".format(arg, what)) return slice(i0,i1,i2) @@ -90,7 +90,7 @@ def _parse_range(arg, nmax): If the range cannot be parsed. """ - fullrange = range(nmax) + fullrange = list(range(nmax)) if arg is None: return fullrange @@ -107,7 +107,7 @@ def _parse_range(arg, nmax): if re.match("\d+$", arg): return [ int(arg) ] elif "," in arg: - return map(int,','.split(arg)) + return list(map(int,','.split(arg))) return fullrange[_parse_slice(arg, "range or slice")] @@ -124,7 +124,7 @@ def _parse_bin(binspec, units, default_int=None, default_float=None, kind='bin') elif type(binspec) is int: return binspec, default_float elif type(binspec) is str: - for unit, multiplier in units.items(): + for unit, multiplier in list(units.items()): if binspec.endswith(unit) and len(binspec) > len(unit): xval = binspec[:-len(unit)] if xval[-1] in _prefixes: @@ -229,7 +229,7 @@ def __init__(self, ms_name, data_column, output_column=None, output_model_column self.fid = fid if fid is not None else 0 - print>>log, ModColor.Str("reading MS %s"%self.ms_name, col="green") + print(ModColor.Str("reading MS %s"%self.ms_name, col="green"), file=log) self.ms = pt.table(self.ms_name, readonly=False, ack=False) self.data = None @@ -278,9 +278,9 @@ def __init__(self, ms_name, data_column, output_column=None, output_model_column self.metadata.antenna_name_prefix = antnames[0][:prefix_length] self.metadata.baseline_name = { (p,q): "{}-{}".format(antnames[p], antnames_short[q]) - for p in xrange(self.nants) for q in xrange(p+1, self.nants)} + for p in range(self.nants) for q in range(p+1, self.nants)} self.metadata.baseline_length = { (p,q): math.sqrt(((antpos[p]-antpos[q])**2).sum()) - for p in xrange(self.nants) for q in xrange(p+1, self.nants)} + for p in range(self.nants) for q in range(p+1, self.nants)} if do_load_CASA_kwtables: # antenna fields to be used when writing gain tables @@ -337,13 +337,13 @@ def __init__(self, ms_name, data_column, output_column=None, output_model_column raise TypeError("unsupported POLARIZATION_TYPE {}. Terminating.".format(self._poltype)) # print some info on MS layout - print>>log," detected {} ({}) feeds".format(self._poltype, self.feeds) - print>>log," fields are "+", ".join(["{}{}: {}".format('*' if i==fid else "",i,name) for i, name in enumerate(_fldtab.getcol("NAME"))]) + print(" detected {} ({}) feeds".format(self._poltype, self.feeds), file=log) + print(" fields are "+", ".join(["{}{}: {}".format('*' if i==fid else "",i,name) for i, name in enumerate(_fldtab.getcol("NAME"))]), file=log) # get list of channel frequencies (this may have varying sizes) - self._spw_chanfreqs = [ _spwtab.getcell("CHAN_FREQ", i) for i in xrange(_spwtab.nrows()) ] - self._spw_chanwidth = [ _spwtab.getcell("CHAN_WIDTH", i) for i in xrange(_spwtab.nrows()) ] - print>>log," MS contains {} spectral windows".format(len(self._spw_chanfreqs)) + self._spw_chanfreqs = [ _spwtab.getcell("CHAN_FREQ", i) for i in range(_spwtab.nrows()) ] + self._spw_chanwidth = [ _spwtab.getcell("CHAN_WIDTH", i) for i in range(_spwtab.nrows()) ] + print(" MS contains {} spectral windows".format(len(self._spw_chanfreqs)), file=log) # figure out DDID range self._num_total_ddids = _ddesctab.nrows() @@ -355,7 +355,7 @@ def __init__(self, ms_name, data_column, output_column=None, output_model_column self._channel_slice = _parse_slice(channels) # form up blc/trc/incr arguments for getcolslice() and putcolslice() if self._channel_slice != slice(None): - print>> log, " applying a channel selection of {}".format(channels) + print(" applying a channel selection of {}".format(channels), file=log) chan0 = self._channel_slice.start if self._channel_slice.start is not None else 0 chan1 = self._channel_slice.stop - 1 if self._channel_slice.stop is not None else -1 self._ms_blc = (chan0, 0) @@ -373,14 +373,14 @@ def __init__(self, ms_name, data_column, output_column=None, output_model_column # now compute binning/chunking for each DDID chunk_chans, chunk_hz = _parse_freqspec(chunk_freq, 1<<31, 1e+99) - print>>log," max freq chunk size is {} channels and/or {} MHz".format( + print(" max freq chunk size is {} channels and/or {} MHz".format( '--' if chunk_chans == 1<<31 else chunk_chans, - '--' if chunk_hz == 1e+99 else chunk_hz*1e-6) + '--' if chunk_hz == 1e+99 else chunk_hz*1e-6), file=log) rebin_chans, rebin_hz = _parse_freqspec(rebin_freq, 1, None) if rebin_hz is not None: - print>>log, " rebinning into {} MHz channels".format(rebin_hz*1e-6) + print(" rebinning into {} MHz channels".format(rebin_hz*1e-6), file=log) elif rebin_chans > 1: - print>>log," rebinning by {} channels".format(rebin_chans) + print(" rebinning by {} channels".format(rebin_chans), file=log) # per DDID: self.rebin_chan_maps = {} # map from raw channel to rebinned channel @@ -440,31 +440,31 @@ def __init__(self, ms_name, data_column, output_column=None, output_model_column nchan = chan+1 chanfreqs = self.chanfreqs[ddid] = np.empty(nchan, float) chanwidth = self.chanwidth[ddid] = np.empty(nchan, float) - for chan in xrange(nchan): + for chan in range(nchan): fmin, fmax = chan_edges.get(chan) chanfreqs[chan] = (fmin+fmax)/2 chanwidth[chan] = (fmax-fmin) self.freqchunks[ddid] = freqchunks = [rebin_chan_map[chan0] for chan0 in freqchunk_chan0] - print>>log(0)," DDID {}: {}/{} selected channels will be rebinned into {} channels".format( - ddid, nchan0, nchan0_orig, nchan) - print>>log(1)," rebinned channel freqs (MHz): {}".format( - " ".join([str(x*1e-6) for x in chanfreqs])) - print>>log(1)," rebinned channel widths (MHz): {}".format( - " ".join([str(x*1e-6) for x in chanwidth])) + print(" DDID {}: {}/{} selected channels will be rebinned into {} channels".format( + ddid, nchan0, nchan0_orig, nchan), file=log(0)) + print(" rebinned channel freqs (MHz): {}".format( + " ".join([str(x*1e-6) for x in chanfreqs])), file=log(1)) + print(" rebinned channel widths (MHz): {}".format( + " ".join([str(x*1e-6) for x in chanwidth])), file=log(1)) else: nchan = nchan0 self.chanfreqs[ddid] = chanfreqs0 self.chanwidth[ddid] = chanwidth0 self.freqchunks[ddid] = freqchunks = freqchunk_chan0 self.rebin_chan_maps[ddid] = None - print>>log(0)," DDID {}: {}/{} channels selected".format(ddid, nchan0, nchan0_orig) + print(" DDID {}: {}/{} channels selected".format(ddid, nchan0, nchan0_orig), file=log(0)) - print>>log(0)," found {} frequency chunks: {}".format(len(freqchunks), - " ".join([str(ch) for ch in freqchunks + [nchan]])) + print(" found {} frequency chunks: {}".format(len(freqchunks), + " ".join([str(ch) for ch in freqchunks + [nchan]])), file=log(0)) # now accumulate list of all frequencies, and also see if selected DDIDs have a uniform rebinning and chunking map all_freqs = set(self.chanfreqs[self._ddids[0]]) - self.do_freq_rebin = any([m is not None for m in self.rebin_chan_maps.values()]) + self.do_freq_rebin = any([m is not None for m in list(self.rebin_chan_maps.values())]) self._ddids_unequal = False ddid0_map = self.rebin_chan_maps[self._ddids[0]] for ddid in self._ddids[1:]: @@ -477,7 +477,7 @@ def __init__(self, ms_name, data_column, output_column=None, output_model_column all_freqs.update(self.chanfreqs[ddid]) if self._ddids_unequal: - print>>log(0,"red"),"Selected DDIDs have differing channel structure. Processing may be less efficient." + print("Selected DDIDs have differing channel structure. Processing may be less efficient.", file=log(0,"red")) # TODO: this assumes DDIDs are ordered in frequency. Exotic cases where this is not? @@ -495,8 +495,8 @@ def __init__(self, ms_name, data_column, output_column=None, output_model_column first_chan -= len(self.chanfreqs[self._ddids[0]]) self.ddid_first_chan = {ddid:first_chan[num] for num,ddid in enumerate(self._ddids)} - print>>log(1)," overall frequency space (MHz): {}".format(" ".join([str(f*1e-6) for f in self.all_freqs])) - print>>log(1)," DDIDs start at channels: {}".format(" ".join([str(ch) for ch in self.ddid_first_chan])) + print(" overall frequency space (MHz): {}".format(" ".join([str(f*1e-6) for f in self.all_freqs])), file=log(1)) + print(" DDIDs start at channels: {}".format(" ".join([str(ch) for ch in self.ddid_first_chan])), file=log(1)) # use TaQL to select subset @@ -506,23 +506,23 @@ def __init__(self, ms_name, data_column, output_column=None, output_model_column self.reopen() if self.taql: - print>> log, " applying TAQL query '%s' (%d/%d rows selected)" % (self.taql, - self.data.nrows(), self.ms.nrows()) + print(" applying TAQL query '%s' (%d/%d rows selected)" % (self.taql, + self.data.nrows(), self.ms.nrows()), file=log) if active_subset: subset = self.data.query(active_subset) self.active_row_numbers = np.array(subset.rownumbers(self.data)) self.inactive_rows = np.zeros(self.data.nrow(), True) self.inactive_rows[self.active_row_numbers] = False - print>> log, " applying TAQL query '%s' for solvable subset (%d/%d rows)" % (active_subset, - subset.nrows(), self.data.nrows()) + print(" applying TAQL query '%s' for solvable subset (%d/%d rows)" % (active_subset, + subset.nrows(), self.data.nrows()), file=log) else: self.active_row_numbers = self.inactive_rows = None self.min_baseline, self.max_baseline = min_baseline, max_baseline self.nrows = self.data.nrows() - self._datashape = {ddid: (self.nrows, len(freqs), self.ncorr) for ddid, freqs in self.chanfreqs.items()} + self._datashape = {ddid: (self.nrows, len(freqs), self.ncorr) for ddid, freqs in list(self.chanfreqs.items())} if not self.nrows: raise ValueError("MS selection returns no rows") @@ -531,13 +531,13 @@ def __init__(self, ms_name, data_column, output_column=None, output_model_column self.uniq_times = np.unique(self.time_col) self.ntime = len(self.uniq_times) - print>>log," %d antennas, %d rows, %d/%d DDIDs, %d timeslots, %d corrs %s" % (self.nants, + print(" %d antennas, %d rows, %d/%d DDIDs, %d timeslots, %d corrs %s" % (self.nants, self.nrows, len(self._ddids), self._num_total_ddids, self.ntime, - self.nmscorrs, "(using diag only)" if self._corr_4to2 else "") - print>>log," DDID central frequencies are at {} GHz".format( - " ".join(["%.2f"%(self.chanfreqs[d][len(self.chanfreqs[d])//2]*1e-9) for d in self._ddids])) + self.nmscorrs, "(using diag only)" if self._corr_4to2 else ""), file=log) + print(" DDID central frequencies are at {} GHz".format( + " ".join(["%.2f"%(self.chanfreqs[d][len(self.chanfreqs[d])//2]*1e-9) for d in self._ddids])), file=log) if self.do_freq_rebin and (output_column or output_model_column): - print>>log(0, "red"),"WARNING: output columns will be upsampled from frequency-binned data!" + print("WARNING: output columns will be upsampled from frequency-binned data!", file=log(0, "red")) self.nddid = len(self._ddids) self.data_column = data_column @@ -548,16 +548,16 @@ def __init__(self, ms_name, data_column, output_column=None, output_model_column reinit_columns = [col for col in [output_column, output_model_column] if col and col in self.ms.colnames()] if reinit_columns: - print>>log(0),"reinitializing output column(s) {}".format(" ".join(reinit_columns)) + print("reinitializing output column(s) {}".format(" ".join(reinit_columns)), file=log(0)) self.ms.removecols(reinit_columns) for col in reinit_columns: self._add_column(col) if output_weight_column is not None: - print>>log(0),"reinitializing output weight column {}".format(output_weight_column) + print("reinitializing output weight column {}".format(output_weight_column), file=log(0)) try: self.ms.removecols(output_weight_column) #Just remove column will be added later except: - print>>log(0),"No output weight column {}, will just proceed".format(output_weight_column) + print("No output weight column {}, will just proceed".format(output_weight_column), file=log(0)) self._add_column(output_weight_column, like_type='float') self.reopen() @@ -580,12 +580,12 @@ def __init__(self, ms_name, data_column, output_column=None, output_model_column self.derotate_output = derotate_output if derotate_output is not None else self.rotate_model self.pa_rotate_montblanc = pa_rotate_montblanc if pa_rotate_montblanc is not None else pa_rotate_model - print>>log(0),"Input model feed rotation {}abled, PA rotation {}abled".format( - "en" if feed_rotate_model else "dis", "en" if pa_rotate_model else "dis") + print("Input model feed rotation {}abled, PA rotation {}abled".format( + "en" if feed_rotate_model else "dis", "en" if pa_rotate_model else "dis"), file=log(0)) if feed_rotate_model: - print>>log(1)," feed angles (deg) are {}".format(", ".join(["{:.1f} {:.1f}".format(*fa) for fa in feed_angles*180/math.pi])) - print>>log(0),"Output visibilities derotation {}abled".format( - "en" if self.derotate_output else "dis") + print(" feed angles (deg) are {}".format(", ".join(["{:.1f} {:.1f}".format(*fa) for fa in feed_angles*180/math.pi])), file=log(1)) + print("Output visibilities derotation {}abled".format( + "en" if self.derotate_output else "dis"), file=log(0)) if self.rotate_model or self.derotate_output: self.parallactic_machine = parallactic_machine(antnames, @@ -610,7 +610,7 @@ def init_models(self, models, weights, fill_offdiag_weights=False, mb_opts={}, u elif len(weights) == 1: weights = weights*len(models) elif len(weights) != len(models): - raise ValueError,"need as many sets of weights as there are models" + raise ValueError("need as many sets of weights as there are models") self.fill_offdiag_weights = fill_offdiag_weights self.use_montblanc = False # will be set to true if Montblanc is invoked @@ -648,36 +648,36 @@ def init_models(self, models, weights, fill_offdiag_weights=False, mb_opts={}, u if montblanc is None: montblanc, exc = data_handler.import_montblanc() if montblanc is None: - print>> log, ModColor.Str("Error importing Montblanc: ") + print(ModColor.Str("Error importing Montblanc: "), file=log) for line in traceback.format_exception(*exc): - print>> log, " " + ModColor.Str(line) - print>> log, ModColor.Str("Without Montblanc, LSM functionality is not available.") + print(" " + ModColor.Str(line), file=log) + print(ModColor.Str("Without Montblanc, LSM functionality is not available."), file=log) raise RuntimeError("Error importing Montblanc") self.use_montblanc = True - import TiggerSourceProvider + from . import TiggerSourceProvider component = TiggerSourceProvider.TiggerSourceProvider(component, self.phadir, dde_tag=use_ddes and tag) for key in component._cluster_keys: dirname = idirtag if key == 'die' else key dirmodels.setdefault(dirname, []).append((component, key, subtract)) else: - raise ValueError,"model component {} is neither a valid LSM nor an MS column".format(component) + raise ValueError("model component {} is neither a valid LSM nor an MS column".format(component)) # else it is a visibility column component else: dirmodels.setdefault(idirtag, []).append((component, None, subtract)) - self.model_directions.update(dirmodels.iterkeys()) + self.model_directions.update(iter(dirmodels.keys())) # Now, each model is a dict of dirmodels, keyed by direction name (unnamed directions are _dir0, _dir1, etc.) # Get all possible direction names self.model_directions = sorted(self.model_directions) # print out the results - print>>log(0),ModColor.Str("Using {} model(s) for {} directions(s){}".format( + print(ModColor.Str("Using {} model(s) for {} directions(s){}".format( len(self.models), len(self.model_directions), " (DDEs explicitly disabled)" if not use_ddes else""), - col="green") + col="green"), file=log(0)) for imod, (dirmodels, weight_col) in enumerate(self.models): - print>>log(0)," model {} (weight {}):".format(imod, weight_col) + print(" model {} (weight {}):".format(imod, weight_col), file=log(0)) for idir, dirname in enumerate(self.model_directions): if dirname in dirmodels: comps = "" @@ -690,9 +690,9 @@ def init_models(self, models, weights, fill_offdiag_weights=False, mb_opts={}, u comps += "{}{}".format(sign, comp) else: comps += "{}{}({})".format(sign,tag, comp) - print>>log(0)," direction {}: {}".format(idir, comps) + print(" direction {}: {}".format(idir, comps), file=log(0)) else: - print>>log(0)," direction {}: empty".format(idir) + print(" direction {}: empty".format(idir), file=log(0)) self.use_ddes = len(self.model_directions) > 1 @@ -776,7 +776,7 @@ def fetchslice(self, column, startrow=0, nrows=-1, subset=None): Result of getcolslice() """ subset = subset or self.data - print>> log(0), "reading {}".format(column) + print("reading {}".format(column), file=log(0)) if self._ms_blc == None: return subset.getcol(column, startrow, nrows) return subset.getcolslice(column, self._ms_blc, self._ms_trc, self._ms_incr, startrow, nrows) @@ -903,21 +903,21 @@ def define_chunk(self, chunk_time, rebin_time, fdim=1, chunk_by=None, chunk_by_j self.rownumbers = self.data.rownumbers() self.time_col = time_col = self.fetch("TIME") self.ddid_col = ddid_col = ddid_col0 = self.fetch("DATA_DESC_ID").astype(np.int64) - print>> log, " read indexing columns ({} total rows)".format(len(self.time_col)) + print(" read indexing columns ({} total rows)".format(len(self.time_col)), file=log) self.do_time_rebin = False self.times, self.uniq_times,_ = data_handler.uniquify(time_col) - print>> log, " built timeslot index ({} unique timestamps)".format(len(self.uniq_times)) + print(" built timeslot index ({} unique timestamps)".format(len(self.uniq_times)), file=log) chunk_timeslots, chunk_seconds = _parse_timespec(chunk_time, 1<<31, 1e+99) - print>>log," max chunk size is {} timeslots and/or {} seconds".format( + print(" max chunk size is {} timeslots and/or {} seconds".format( '--' if chunk_timeslots == 1<<31 else chunk_timeslots, - '--' if chunk_seconds == 1e+99 else chunk_seconds) + '--' if chunk_seconds == 1e+99 else chunk_seconds), file=log) rebin_timeslots, rebin_seconds = _parse_timespec(rebin_time, 1, None) if rebin_seconds is not None: - print>>log, " computing time rebinning into {} seconds".format(rebin_seconds) + print(" computing time rebinning into {} seconds".format(rebin_seconds), file=log) elif rebin_timeslots > 1: - print>>log," computing time rebinning by {} timeslots".format(rebin_timeslots) + print(" computing time rebinning by {} timeslots".format(rebin_timeslots), file=log) import cubical.kernels rebinning = cubical.kernels.import_kernel("rebinning") @@ -970,9 +970,9 @@ def define_chunk(self, chunk_time, rebin_time, fdim=1, chunk_by=None, chunk_by_j nrow_out += 1 self.rebin_row_map[row0] = row if a1>log," found {} time chunks: {} {}".format(len(timechunk_row0), + print(" found {} time chunks: {} {}".format(len(timechunk_row0), " ".join(["{}:{}:{}".format(i, r, self.times[r]) for i, r in enumerate(timechunk_row0)]), - str(self.times[-1]+1)) + str(self.times[-1]+1)), file=log) # at the end of this, we have a list of timechunk_row0: i.e. a list of starting rows for # each time chunk (which may composed of multiple DDIDs), plus rebin_row_map: a vector giving @@ -990,9 +990,9 @@ def define_chunk(self, chunk_time, rebin_time, fdim=1, chunk_by=None, chunk_by_j self.times, self.uniq_times, _ = data_handler.uniquify(self.time_col) self.do_time_rebin = True - print>> log, " will rebin into {} rows ({} rebinned timeslots)".format(nrow_out, len(self.uniq_times)) + print(" will rebin into {} rows ({} rebinned timeslots)".format(nrow_out, len(self.uniq_times)), file=log) if self.output_column or self.output_model_column: - print>> log(0, "red"), "WARNING: output columns will be upsampled from time-binned data!" + print("WARNING: output columns will be upsampled from time-binned data!", file=log(0, "red")) else: self.rebin_row_map = np.arange(nrows0, dtype=int) # swap conjugate baselines @@ -1055,7 +1055,7 @@ def define_chunk(self, chunk_time, rebin_time, fdim=1, chunk_by=None, chunk_by_j chunklist.append(RowChunk(ddid, tchunk, timeslice, rows, rows0)) self.nddid_actual = len(self._actual_ddids) - print>>log," generated {} row chunks based on time and DDID".format(len(chunklist)) + print(" generated {} row chunks based on time and DDID".format(len(chunklist)), file=log) # re-sort these row chunks into naturally increasing order (by first row of each chunk) def _compare_chunks(a, b): @@ -1063,7 +1063,7 @@ def _compare_chunks(a, b): chunklist.sort(cmp=_compare_chunks) if log.verbosity() > 2: - print>>log(3)," row chunks: {}".format(", ".join(["{} {}:{}".format(ch.tchunk, min(ch.rows0), max(ch.rows0)+1) for ch in chunklist])) + print(" row chunks: {}".format(", ".join(["{} {}:{}".format(ch.tchunk, min(ch.rows0), max(ch.rows0)+1) for ch in chunklist])), file=log(3)) # now, break the row chunks into tiles. Tiles are an "atom" of I/O. First, we try to define each tile as a # sequence of overlapping row chunks (i.e. chunks such that the first row of a subsequent chunk comes before @@ -1083,7 +1083,7 @@ def _compare_chunks(a, b): else: tile_list[-1].append(chunk) - print>> log, " row chunks yield {} potential tiles".format(len(tile_list)) + print(" row chunks yield {} potential tiles".format(len(tile_list)), file=log) # now, for effective I/O and parallelisation, we need to have a minimum amount of chunks per tile. # Coarsen our tiles to achieve this @@ -1104,8 +1104,8 @@ def _compare_chunks(a, b): max_chunks = max([tile.total_tf_chunks() for tile in tile_list]) - print>> log, " coarsening this to {} tiles (max {} chunks per tile, based on {}/{} requested)".format( - len(tile_list), max_chunks, chunks_per_tile, max_chunks_per_tile) + print(" coarsening this to {} tiles (max {} chunks per tile, based on {}/{} requested)".format( + len(tile_list), max_chunks, chunks_per_tile, max_chunks_per_tile), file=log) return max_chunks, tile_list @@ -1120,26 +1120,26 @@ def define_flags(self, tile_list, flagopts): # Do we have a proper bitflag column? bitflags = None if "BITFLAG" in self.ms.colnames(): - print>> log(1), "checking MS BITFLAG column" + print("checking MS BITFLAG column", file=log(1)) # asked to re-initialize: blow it away if reinit_bitflags: - print>> log(0, "red"), "will re-initialize BITFLAG column, since --flags-reinit-bitflags is set." - print>> log(0, "red"), "WARNING: current state of FLAG column will be used to init bitflags!" + print("will re-initialize BITFLAG column, since --flags-reinit-bitflags is set.", file=log(0, "red")) + print("WARNING: current state of FLAG column will be used to init bitflags!", file=log(0, "red")) # check for consistency: BITFLAG_ROW must be present too elif "BITFLAG_ROW" not in self.ms.colnames(): - print>> log(0, "red"), "WARNING: the BITFLAG_ROW column does not appear to be properly initialized. " \ - "This is perhaps due to a previous CubiCal run being interrupted while it was filling the column. " + print("WARNING: the BITFLAG_ROW column does not appear to be properly initialized. " \ + "This is perhaps due to a previous CubiCal run being interrupted while it was filling the column. ", file=log(0, "red")) # auto-fill keyword must be cleared (otherwise a filling loop was interrupted) elif "AUTOINIT_IN_PROGRESS" in self.ms.colkeywordnames("BITFLAG"): - print>> log(0, "red"), "WARNING: the BITFLAG column does not appear to be properly initialized. " \ - "This is perhaps due to a previous CubiCal run being interrupted while it was filling the column. " + print("WARNING: the BITFLAG column does not appear to be properly initialized. " \ + "This is perhaps due to a previous CubiCal run being interrupted while it was filling the column. ", file=log(0, "red")) # all cells must be defined - elif not all([self.data.iscelldefined("BITFLAG", i) for i in xrange(self.data.nrows())]): - print>> log(0, "red"), "WARNING: the BITFLAG column appears to have missing cells. " \ - "This is perhaps due to a previous CubiCal run being interrupted while it was filling the column. " + elif not all([self.data.iscelldefined("BITFLAG", i) for i in range(self.data.nrows())]): + print("WARNING: the BITFLAG column appears to have missing cells. " \ + "This is perhaps due to a previous CubiCal run being interrupted while it was filling the column. ", file=log(0, "red")) # OK, it's valid as best as we can tell else: - print>> log(0), "the MS appears to have a properly formed BITFLAG column" + print("the MS appears to have a properly formed BITFLAG column", file=log(0)) bitflags = flagging.Flagsets(self.ms) # If no bitflags at this stage (though the column exists), then blow it away if auto_init is enabled. @@ -1151,7 +1151,7 @@ def define_flags(self, tile_list, flagopts): self.ms.removecols("BITFLAG") if "BITFLAG_ROW" in self.ms.colnames(): self.ms.removecols("BITFLAG_ROW") - print>> log(0, "red"), "removing current BITFLAG/BITFLAG_ROW columns" + print("removing current BITFLAG/BITFLAG_ROW columns", file=log(0, "red")) self.reopen() self._apply_flags = self._apply_bitflags = self._save_bitflag = self._auto_fill_bitflag = None @@ -1170,10 +1170,10 @@ def define_flags(self, tile_list, flagopts): if type(auto_init) is not str: raise ValueError("Illegal --flags-auto-init setting -- a flagset name such as 'legacy' must be specified") if auto_init in bitflags.names(): - print>>log(0), " bitflag '{}' already exists, will not auto-fill".format(auto_init) + print(" bitflag '{}' already exists, will not auto-fill".format(auto_init), file=log(0)) else: - print>>log(0, "blue"), " auto-filling bitflag '{}' from FLAG/FLAG_ROW column. Please do not interrupt this process!".format(auto_init) - print>>log(0), " note that all other bitflags will be cleared by this" + print(" auto-filling bitflag '{}' from FLAG/FLAG_ROW column. Please do not interrupt this process!".format(auto_init), file=log(0, "blue")) + print(" note that all other bitflags will be cleared by this", file=log(0)) self.ms.putcolkeyword("BITFLAG", "AUTOINIT_IN_PROGRESS", True) self._auto_fill_bitflag = bitflags.flagmask(auto_init, create=True) @@ -1181,7 +1181,7 @@ def define_flags(self, tile_list, flagopts): tile.fill_bitflags(self._auto_fill_bitflag) self.ms.removecolkeyword("BITFLAG", "AUTOINIT_IN_PROGRESS") - print>>log(0, "blue"), " auto-fill complete" + print(" auto-fill complete", file=log(0, "blue")) # init flagcounts dict @@ -1199,46 +1199,46 @@ def define_flags(self, tile_list, flagopts): elif type(apply_flags) is not str: raise ValueError("Illegal --flags-apply setting -- string or bitmask values expected") else: - print>>log," BITFLAG column defines the following flagsets: {}".format( - " ".join(['{}:{}'.format(name, bitflags.bits[name]) for name in bitflags.names()])) + print(" BITFLAG column defines the following flagsets: {}".format( + " ".join(['{}:{}'.format(name, bitflags.bits[name]) for name in bitflags.names()])), file=log) if apply_flags == "FLAG": self._apply_flags = True elif apply_flags[0] == '-': flagset = apply_flags[1:] - print>> log(0), " will exclude flagset {}".format(flagset) + print(" will exclude flagset {}".format(flagset), file=log(0)) if flagset not in bitflags.bits: - print>>log(0,"red")," flagset '{}' not found -- ignoring".format(flagset) - self._apply_bitflags = sum([bitmask for fset, bitmask in bitflags.bits.iteritems() if fset != flagset]) + print(" flagset '{}' not found -- ignoring".format(flagset), file=log(0,"red")) + self._apply_bitflags = sum([bitmask for fset, bitmask in bitflags.bits.items() if fset != flagset]) else: - print>> log(0), " will apply flagset(s) {}".format(apply_flags) + print(" will apply flagset(s) {}".format(apply_flags), file=log(0)) apply_flags = apply_flags.split(",") for flagset in apply_flags: if flagset not in bitflags.bits: - print>>log(0,"red")," flagset '{}' not found -- ignoring".format(flagset) + print(" flagset '{}' not found -- ignoring".format(flagset), file=log(0,"red")) else: self._apply_bitflags |= bitflags.bits[flagset] if self._apply_flags: - print>> log, " using flags from FLAG/FLAG_ROW columns" + print(" using flags from FLAG/FLAG_ROW columns", file=log) if self._apply_bitflags: - print>> log(0, "blue"), " applying BITFLAG mask {} to input data".format(self._apply_bitflags) + print(" applying BITFLAG mask {} to input data".format(self._apply_bitflags), file=log(0, "blue")) elif not self._apply_flags: - print>> log(0, "red"), " no input flags will be applied!" + print(" no input flags will be applied!", file=log(0, "red")) if save_bitflag: self._save_bitflag = bitflags.flagmask(save_bitflag, create=True) if self._save_flags: if self._save_flags_apply: - print>> log(0,"blue"), " will save output flags into BITFLAG '{}' ({}), and all flags (including input) FLAG/FLAG_ROW".format( - save_bitflag, self._save_bitflag) + print(" will save output flags into BITFLAG '{}' ({}), and all flags (including input) FLAG/FLAG_ROW".format( + save_bitflag, self._save_bitflag), file=log(0,"blue")) else: - print>> log(0,"blue"), " will save output flags into BITFLAG '{}' ({}), and into FLAG/FLAG_ROW".format(save_bitflag, self._save_bitflag) + print(" will save output flags into BITFLAG '{}' ({}), and into FLAG/FLAG_ROW".format(save_bitflag, self._save_bitflag), file=log(0,"blue")) else: - print>> log(0,"red"), " will save output flags into BITFLAG '{}' ({}), but not into FLAG/FLAG_ROW".format(save_bitflag, self._save_bitflag) + print(" will save output flags into BITFLAG '{}' ({}), but not into FLAG/FLAG_ROW".format(save_bitflag, self._save_bitflag), file=log(0,"red")) else: if self._save_flags: if self._save_flags_apply: - print>> log(0, "blue"), " will save all flags (including input) into FLAG/FLAG_ROW" + print(" will save all flags (including input) into FLAG/FLAG_ROW", file=log(0, "blue")) else: - print>> log(0, "blue"), " will save output flags into FLAG/FLAG_ROW" + print(" will save output flags into FLAG/FLAG_ROW", file=log(0, "blue")) for flagset in bitflags.names(): self.flagcounts[flagset] = 0 @@ -1252,11 +1252,11 @@ def define_flags(self, tile_list, flagopts): self._apply_flags = bool(apply_flags) self._apply_bitflags = 0 if self._apply_flags: - print>> log, ModColor.Str(" no BITFLAG column in this MS. Using flags from FLAG/FLAG_ROW columns") + print(ModColor.Str(" no BITFLAG column in this MS. Using flags from FLAG/FLAG_ROW columns"), file=log) else: - print>> log, ModColor.Str(" no flags will be read, since --flags-apply was not set") + print(ModColor.Str(" no flags will be read, since --flags-apply was not set"), file=log) if self._save_flags: - print>> log(0, "blue"), " will save output flags into into FLAG/FLAG_ROW" + print(" will save output flags into into FLAG/FLAG_ROW", file=log(0, "blue")) self._save_flags_apply = False # no point in saving input flags, as nothing would change self.bitflags = {} @@ -1272,7 +1272,7 @@ def update_flag_counts(self, counts): def get_flag_counts(self): total = float(self.flagcounts['TOTAL']) result = [] - for name, count in self.flagcounts.iteritems(): + for name, count in self.flagcounts.items(): if name != 'TOTAL': result.append("{}:{:.2%}".format(name, count/total)) return result @@ -1316,15 +1316,15 @@ def add_to_gain_dict(self, gains, bounds, t_int=1, f_int=1): timestamps = self.chunk_timestamps[timechunk] - freqs = range(first_f,last_f) - freq_indices = [[] for i in xrange(n_fre)] + freqs = list(range(first_f,last_f)) + freq_indices = [[] for i in range(n_fre)] for f, freq in enumerate(freqs): freq_indices[f//f_int].append(freq) - for d in xrange(n_dir): - for t in xrange(n_tim): - for f in xrange(n_fre): + for d in range(n_dir): + for t in range(n_tim): + for f in range(n_fre): comp_idx = (d,tuple(timestamps),tuple(freq_indices[f])) self.gain_dict[comp_idx] = gains[d,t,f,:] @@ -1340,7 +1340,7 @@ def write_gain_dict(self, output_name=None): if output_name is None: output_name = self.ms_name + "/gains.p" - cPickle.dump(self.gain_dict, open(output_name, "wb"), protocol=2) + pickle.dump(self.gain_dict, open(output_name, "wb"), protocol=2) def _add_column (self, col_name, like_col="DATA", like_type=None): """ @@ -1361,7 +1361,7 @@ def _add_column (self, col_name, like_col="DATA", like_type=None): if col_name not in self.ms.colnames(): # new column needs to be inserted -- get column description from column 'like_col' - print>> log, " inserting new column %s" % (col_name) + print(" inserting new column %s" % (col_name), file=log) desc = self.ms.getcoldesc(like_col) desc['name'] = col_name desc['comment'] = desc['comment'].replace(" ", "_") # got this from Cyril, not sure why @@ -1426,36 +1426,36 @@ def save_flags(self, flags): Flag values to be written to column. """ - print>>log,"Writing out new flags" + print("Writing out new flags", file=log) try: bflag_col = self.fetch("BITFLAG") except Exception: if not self._auto_fill_bitflag: - print>> log, ModColor.Str(traceback.format_exc().strip()) - print>> log, ModColor.Str("Error reading BITFLAG column, and --flags-auto-init is not set.") + print(ModColor.Str(traceback.format_exc().strip()), file=log) + print(ModColor.Str("Error reading BITFLAG column, and --flags-auto-init is not set."), file=log) raise - print>> log(0,"red"), "Error reading BITFLAG column: not fatal, since we'll auto-fill it from FLAG" - print>> log(0,"red"), "However, it really should have been filled above, so this may be a bug." - print>> log(0,"red"), "Please save your logfile and contact the developers." + print("Error reading BITFLAG column: not fatal, since we'll auto-fill it from FLAG", file=log(0,"red")) + print("However, it really should have been filled above, so this may be a bug.", file=log(0,"red")) + print("Please save your logfile and contact the developers.", file=log(0,"red")) for line in traceback.format_exc().strip().split("\n"): - print>> log, " " + line + print(" " + line, file=log) flag_col = self.fetch("FLAG") bflag_col = np.zeros(flag_col.shape, np.int32) bflag_col[flag_col] = self._auto_fill_bitflag # raise specified bitflag - print>> log, " updating BITFLAG column flagbit %d"%self._save_bitflag + print(" updating BITFLAG column flagbit %d"%self._save_bitflag, file=log) #bflag_col[:, self._channel_slice, :] &= ~self._save_bitflag # clear the flagbit first bflag_col[:, self._channel_slice, :][flags] |= self._save_bitflag self.data.putcol("BITFLAG", bflag_col) - print>>log, " updating BITFLAG_ROW column" + print(" updating BITFLAG_ROW column", file=log) self.data.putcol("BITFLAG_ROW", np.bitwise_and.reduce(bflag_col, axis=(-1,-2))) flag_col = bflag_col != 0 - print>> log, " updating FLAG column ({:.2%} visibilities flagged)".format( - flag_col.sum()/float(flag_col.size)) + print(" updating FLAG column ({:.2%} visibilities flagged)".format( + flag_col.sum()/float(flag_col.size)), file=log) self.data.putcol("FLAG", flag_col) flag_row = flag_col.all(axis=(-1,-2)) - print>> log, " updating FLAG_ROW column ({:.2%} rows flagged)".format( - flag_row.sum()/float(flag_row.size)) + print(" updating FLAG_ROW column ({:.2%} rows flagged)".format( + flag_row.sum()/float(flag_row.size)), file=log) self.data.putcol("FLAG_ROW", flag_row) self.data.flush() diff --git a/cubical/data_handler/ms_tile.py b/cubical/data_handler/ms_tile.py index 20cc89a8..6c27c6c2 100644 --- a/cubical/data_handler/ms_tile.py +++ b/cubical/data_handler/ms_tile.py @@ -103,7 +103,7 @@ def upsample(self, data): if not self.tile.dh.do_freq_rebin and not self.tile.dh.do_time_rebin: return data shape = len(self.rebin_row_map), len(self.rebin_chan_map), data.shape[2] - print>>log(1),"upsampling to {} rows and {} channels".format(shape[0], shape[1]) + print("upsampling to {} rows and {} channels".format(shape[0], shape[1]), file=log(1)) return data[self.rebin_row_map[:, np.newaxis], self.rebin_chan_map[np.newaxis, :], None].reshape(shape) @@ -123,7 +123,7 @@ def load_montblanc_models(self, uvwco, loaded_models, model_source, cluster, imo """ - import MBTiggerSim + from . import MBTiggerSim from montblanc.impl.rime.tensorflow.sources import CachedSourceProvider, FitsBeamSourceProvider # setup montblanc machinery once per subset (may be called multiple times for different models) @@ -153,8 +153,8 @@ def load_montblanc_models(self, uvwco, loaded_models, model_source, cluster, imo np.int32) # make full list of row indices in Montblanc-compliant order (ddid-time-ant1-ant2) - full_index = [(p, q, t, d) for d in xrange(len(uniq_ddids)) for t in uniq_times - for p in xrange(self.nants) for q in xrange(self.nants) + full_index = [(p, q, t, d) for d in range(len(uniq_ddids)) for t in uniq_times + for p in range(self.nants) for q in range(self.nants) if p < q] self._expected_nrows = len(full_index) @@ -162,19 +162,19 @@ def load_montblanc_models(self, uvwco, loaded_models, model_source, cluster, imo # and corresponding full set of indices full_row_set = set(full_index) - print>> log(1), " {} rows ({} expected for {} timeslots, {} baselines and {} DDIDs)".format( - self.nrows, self._expected_nrows, ntime, n_bl, len(uniq_ddids)) + print(" {} rows ({} expected for {} timeslots, {} baselines and {} DDIDs)".format( + self.nrows, self._expected_nrows, ntime, n_bl, len(uniq_ddids)), file=log(1)) # make mapping from existing indices -> row numbers, omitting autocorrelations current_row_index = {(p, q, t, d): row for row, (p, q, t, d) in enumerate(zip(self.antea, self.anteb, self.times, ddid_index)) if p != q} # do we need to add fake rows for missing data? - missing = full_row_set.difference(current_row_index.iterkeys()) + missing = full_row_set.difference(iter(current_row_index.keys())) nmiss = len(missing) if nmiss: - print>> log(1), " {} rows will be padded in for Montblanc".format(nmiss) + print(" {} rows will be padded in for Montblanc".format(nmiss), file=log(1)) # pad up columns uvwco = np.concatenate((uvwco, [[0, 0, 0]] * nmiss)) antea = np.concatenate((self.antea, @@ -211,7 +211,7 @@ def load_montblanc_models(self, uvwco, loaded_models, model_source, cluster, imo else: self._mb_arbeam_src = None - print>> log(0), " computing visibilities for {}".format(model_source) + print(" computing visibilities for {}".format(model_source), file=log(0)) # setup Montblanc computation for this LSM tigger_source = model_source cached_src = CachedSourceProvider(tigger_source, clear_start=True, clear_stop=True) @@ -224,11 +224,11 @@ def load_montblanc_models(self, uvwco, loaded_models, model_source, cluster, imo model_shape = (ndirs, 1, self._expected_nrows, self.nfreq, self.tile.dh.ncorr) use_double = self.tile.dh.mb_opts['dtype'] == 'double' full_model = np.zeros(model_shape, np.complex128 if use_double else self.tile.dh.ctype) - print>>log(0),"montblanc dtype is {} ('{}')".format(full_model.dtype, self.tile.dh.mb_opts['dtype']) + print("montblanc dtype is {} ('{}')".format(full_model.dtype, self.tile.dh.mb_opts['dtype']), file=log(0)) column_snk = MBTiggerSim.ColumnSinkProvider(self.tile.dh, self._freqs.shape, full_model, self._mb_sorted_ind) snks = [column_snk] - for direction in xrange(ndirs): + for direction in range(ndirs): tigger_source.set_direction(direction) tigger_source.set_frequency(self._freqs) column_snk.set_direction(direction) @@ -243,9 +243,9 @@ def load_montblanc_models(self, uvwco, loaded_models, model_source, cluster, imo for i, clus in enumerate(tigger_source._cluster_keys)} model = loaded_models[model_source][cluster] - print>> log(1), " using {}{} for model {} direction {}".format(model_source, + print(" using {}{} for model {} direction {}".format(model_source, "" if not cluster else ("()" if cluster == 'die' else "({})".format(cluster)), - imod, idir) + imod, idir), file=log(1)) # release memory asap del column_snk, snks @@ -292,7 +292,7 @@ def _column_to_cube(self, column, chunk_tdim, chunk_fdim, rows, freqs, dtype, ze possible_dims = ["dirs", "mods", "rows", "freqs", "cors"] - dims = {possible_dims[-i]: column.shape[-i] for i in xrange(1, col_ndim + 1)} + dims = {possible_dims[-i]: column.shape[-i] for i in range(1, col_ndim + 1)} dims.setdefault("mods", 1) dims.setdefault("dirs", 1) @@ -324,10 +324,10 @@ def _column_to_cube(self, column, chunk_tdim, chunk_fdim, rows, freqs, dtype, ze corr_slice = slice(None) if self.ncorr == 4 else slice(None, None, 3) col_selections = [tuple([dirs, mods, rows, freqs, slice(None)][-col_ndim:]) - for dirs in xrange(dims["dirs"]) for mods in xrange(dims["mods"])] + for dirs in range(dims["dirs"]) for mods in range(dims["mods"])] cub_selections = [[dirs, mods, tchunk, slice(None), achunk, bchunk, corr_slice][-(reqdims - 1):] - for dirs in xrange(dims["dirs"]) for mods in xrange(dims["mods"])] + for dirs in range(dims["dirs"]) for mods in range(dims["mods"])] # The following takes the arbitrarily ordered data from the MS and places it into a N-D # data structure (correlation matrix). @@ -361,7 +361,7 @@ def _column_to_cube(self, column, chunk_tdim, chunk_fdim, rows, freqs, dtype, ze # This zeros the diagonal elements in the "baseline" plane. This is purely a precaution - # we do not want autocorrelations on the diagonal. - out_arr[..., range(self.nants), range(self.nants), :] = zeroval + out_arr[..., list(range(self.nants)), list(range(self.nants)), :] = zeroval return out_arr0 @@ -539,7 +539,7 @@ def get_chunk_indices(self, key): def get_chunk_keys(self): """ Returns all chunk keys. """ - return self._chunk_dict.iterkeys() + return iter(self._chunk_dict.keys()) def get_chunk_tfs(self, key): """ @@ -566,11 +566,11 @@ def get_chunk_tfs(self, key): def fill_bitflags(self, flagbit): for subset in self._subsets: if subset.label is None: - print>> log(1), " {}: filling bitflags, MS rows {}~{}".format(self.label, self.first_row0, self.last_row0) + print(" {}: filling bitflags, MS rows {}~{}".format(self.label, self.first_row0, self.last_row0), file=log(1)) else: - print>> log(1), " {}: filling bitflags, MS rows {}~{}, {} ({} rows)".format(self.label, self.first_row0, + print(" {}: filling bitflags, MS rows {}~{}, {} ({} rows)".format(self.label, self.first_row0, self.last_row0, subset.label, - len(subset.rows0)) + len(subset.rows0)), file=log(1)) table_subset = self.dh.data.selectrows(subset.rows0) flagcol = table_subset.getcol("FLAG") flagrow = table_subset.getcol("FLAG_ROW") @@ -618,12 +618,12 @@ def load(self, load_model=True): for subset in self._subsets: if subset.label is None: - print>> log(0, "blue"), "{}: reading MS rows {}~{}".format(self.label, self.first_row0, self.last_row0) + print("{}: reading MS rows {}~{}".format(self.label, self.first_row0, self.last_row0), file=log(0, "blue")) data = data0 else: - print>> log(0, "blue"), "{}: reading MS rows {}~{}, {} ({} rows)".format(self.label, self.first_row0, + print("{}: reading MS rows {}~{}, {} ({} rows)".format(self.label, self.first_row0, self.last_row0, subset.label, - len(subset.rows0)) + len(subset.rows0)), file=log(0, "blue")) data = shared_dict.create(subset.datadict) table_subset = self.dh.data.selectrows(subset.rows0) @@ -632,10 +632,10 @@ def load(self, load_model=True): original_row_numbers = table_subset.rownumbers(self.data) obvis0 = self.dh.fetchslice(self.dh.data_column, subset=table_subset).astype(self.dh.ctype) - print>> log(2), " read " + self.dh.data_column + print(" read " + self.dh.data_column, file=log(2)) uvw0 = table_subset.getcol("UVW") - print>> log(2), " read UVW coordinates" + print(" read UVW coordinates", file=log(2)) # read weight columns, if a model is to be read @@ -650,7 +650,7 @@ def load(self, load_model=True): weight_col = weight_col[:-1] wcol = wcol_cache.get(weight_col) if wcol is None: - print>> log(0), "model {} weights {}: reading from {}{}".format(imod, iwcol, weight_col, mean_corr) + print("model {} weights {}: reading from {}{}".format(imod, iwcol, weight_col, mean_corr), file=log(0)) wcol = table_subset.getcol(weight_col) # support two shapes of wcol: either same as data (a-la WEIGHT_SPECTRUM), or missing # a frequency axis (a-la WEIGHT) @@ -659,14 +659,14 @@ def load(self, load_model=True): if wcol.shape != obvis0.shape: raise RuntimeError("column {} does not match shape of visibility column".format(weight_col)) elif tuple(wcol.shape) == (obvis0.shape[0], self.dh.nmscorrs): - print>> log(0), " this weight column does not have a frequency axis: broadcasting" + print(" this weight column does not have a frequency axis: broadcasting", file=log(0)) wcol_cache[weight_col] = np.empty_like(obvis0, self.dh.wtype) wcol_cache[weight_col][:] = wcol[:, np.newaxis, self.dh._corr_slice] wcol = wcol_cache[weight_col] else: raise RuntimeError("column {} has an invalid shape {} (expected {})".format(weight_col, wcol.shape, obvis0.shape)) else: - print>> log(0), "model {} weights {}: reusing {}{}".format(imod, iwcol, weight_col, mean_corr) + print("model {} weights {}: reusing {}{}".format(imod, iwcol, weight_col, mean_corr), file=log(0)) # take mean weights if specified, else fill off-diagonals if mean_corr: wcol = wcol.mean(-1)[..., np.newaxis] @@ -701,7 +701,7 @@ def load(self, load_model=True): flagcol = self.dh.fetchslice("FLAG", subset=table_subset) flagrow = table_subset.getcol("FLAG_ROW") flagcol[flagrow, :, :] = True - print>> log(2), " read FLAG/FLAG_ROW" + print(" read FLAG/FLAG_ROW", file=log(2)) # compute stats self._flagcol_sum = flagcol.sum() self.dh.flagcounts["FLAG"] += self._flagcol_sum @@ -717,18 +717,18 @@ def load(self, load_model=True): inactive = np.zeros(nrows0, bool) num_inactive = inactive.sum() if num_inactive: - print>> log(0), " applying a solvable subset deselects {} rows".format(num_inactive) + print(" applying a solvable subset deselects {} rows".format(num_inactive), file=log(0)) # apply baseline selection if self.dh.min_baseline or self.dh.max_baseline: uv2 = (uvw0[:, 0:2] ** 2).sum(1) inactive[uv2 < self.dh.min_baseline ** 2] = True if self.dh.max_baseline: inactive[uv2 > self.dh.max_baseline ** 2] = True - print>> log(0), " applying solvable baseline cutoff deselects {} rows".format( - inactive.sum() - num_inactive) + print(" applying solvable baseline cutoff deselects {} rows".format( + inactive.sum() - num_inactive), file=log(0)) num_inactive = inactive.sum() if num_inactive: - print>> log(0), " {:.2%} visibilities deselected via specificed subset and/or baseline cutoffs".format(num_inactive / float(inactive.size)) + print(" {:.2%} visibilities deselected via specificed subset and/or baseline cutoffs".format(num_inactive / float(inactive.size)), file=log(0)) flag_arr0[inactive] |= FL.SKIPSOL self.dh.flagcounts["DESEL"] += num_inactive*flag_arr0[0].size @@ -742,9 +742,9 @@ def load(self, load_model=True): self.bflagcol = self.dh.fetchslice("BITFLAG", subset=table_subset) self.bflagcol[:] = np.bitwise_or.reduce(self.bflagcol, axis=2)[:,:,np.newaxis] - print>> log(2), " read BITFLAG/BITFLAG_ROW" + print(" read BITFLAG/BITFLAG_ROW", file=log(2)) # compute stats - for flagset, bitmask in self.dh.bitflags.iteritems(): + for flagset, bitmask in self.dh.bitflags.items(): flagged = self.bflagcol & bitmask != 0 flagged[self.bflagrow & bitmask != 0, :, :] = True self.dh.flagcounts[flagset] += flagged.sum() @@ -770,7 +770,7 @@ def load(self, load_model=True): flag_arr0[invalid] |= FL.INVALID self.dh.flagcounts.setdefault("INVALID", 0) self.dh.flagcounts["INVALID"] += ninv - print>> log(0,"red"), " {:.2%} input visibilities flagged as invalid (0/inf/nan)".format(ninv / float(flagged.size)) + print(" {:.2%} input visibilities flagged as invalid (0/inf/nan)".format(ninv / float(flagged.size)), file=log(0,"red")) # check for invalid weights if self.dh.has_weights and load_model: @@ -781,8 +781,8 @@ def load(self, load_model=True): flag_arr0[invalid] |= FL.INVWGHT self.dh.flagcounts.setdefault("INVWGHT", 0) self.dh.flagcounts["INVWGHT"] += ninv - print>> log(0, "red"), " {:.2%} input visibilities flagged due to inf/nan weights".format( - ninv / float(flagged.size)) + print(" {:.2%} input visibilities flagged due to inf/nan weights".format( + ninv / float(flagged.size)), file=log(0, "red")) wnull = (weights0 == 0).all(axis=0) & ~flagged nnull = wnull.sum() if nnull: @@ -790,18 +790,18 @@ def load(self, load_model=True): flag_arr0[wnull] |= FL.NULLWGHT self.dh.flagcounts.setdefault("NULLWGHT", 0) self.dh.flagcounts["NULLWGHT"] += nnull - print>> log(0, "red"), " {:.2%} input visibilities flagged due to null weights".format( - nnull / float(flagged.size)) + print(" {:.2%} input visibilities flagged due to null weights".format( + nnull / float(flagged.size)), file=log(0, "red")) nfl = flagged.sum() self.dh.flagcounts["IN"] += nfl - print>> log, " {:.2%} input visibilities flagged and/or deselected".format(nfl / float(flagged.size)) + print(" {:.2%} input visibilities flagged and/or deselected".format(nfl / float(flagged.size)), file=log) # now rebin arrays if appropriate if self.dh.do_freq_rebin or self.dh.do_time_rebin: nrows = abs(subset.rebin_row_map[-1])+1 nchan = subset.rebin_chan_map[-1]+1 - print>> log(0), " rebinning into {} rows and {} channels".format(nrows, nchan) + print(" rebinning into {} rows and {} channels".format(nrows, nchan), file=log(0)) import cubical.kernels rebinning = cubical.kernels.import_kernel("rebinning") @@ -857,21 +857,21 @@ def load(self, load_model=True): subtract_str = " (-)" if subtract else "" # see if data for this model is already loaded if model_source in loaded_models: - print>> log(0), " reusing {}{} for model {} direction {}{}".format(model_source, + print(" reusing {}{} for model {} direction {}{}".format(model_source, "" if not cluster else ( "()" if cluster == 'die' else "({})".format( cluster)), - imod, idir, subtract_str) + imod, idir, subtract_str), file=log(0)) model = loaded_models[model_source][cluster] # cluster of None signifies that this is a visibility column elif cluster is None: if model_source is 1: - print>> log(0), " using 1.+0j for model {} direction {}{}".format(model_source, - imod, idir, subtract_str) + print(" using 1.+0j for model {} direction {}{}".format(model_source, + imod, idir, subtract_str), file=log(0)) model = np.ones_like(obvis) else: - print>> log(0), " reading {} for model {} direction {}{}".format(model_source, imod, - idir, subtract_str) + print(" reading {} for model {} direction {}{}".format(model_source, imod, + idir, subtract_str), file=log(0)) model0 = self.dh.fetchslice(model_source, subset=table_subset) # sanity check (I've seen nulls coming out of wsclean...) invmodel = (~np.isfinite(model0)) @@ -879,8 +879,8 @@ def load(self, load_model=True): invmodel &= (flag_arr0==0) num_inv = invmodel.sum() if num_inv: - print>>log(0,"red"), " {} ({:.2%}) model visibilities flagged as 0/inf/nan".format( - num_inv, num_inv / float(invmodel.size)) + print(" {} ({:.2%}) model visibilities flagged as 0/inf/nan".format( + num_inv, num_inv / float(invmodel.size)), file=log(0,"red")) flag_arr0[invmodel] |= FL.INVMODEL # now rebin (or conjugate) if self.dh.do_freq_rebin or self.dh.do_time_rebin: @@ -921,8 +921,8 @@ def load(self, load_model=True): flag_arr[invmodel] |= FL.INVMODEL self.dh.flagcounts.setdefault("INVMODEL", 0) self.dh.flagcounts["INVMODEL"] += ninv*rebin_factor - print>> log(0, "red"), " {} ({:.2%}) visibilities flagged due to 0/inf/nan model".format( - ninv, ninv / float(flagged.size)) + print(" {} ({:.2%}) visibilities flagged due to 0/inf/nan model".format( + ninv, ninv / float(flagged.size)), file=log(0, "red")) # release memory (gc.collect() particularly important), as model visibilities are *THE* major user (especially # in the DD case) @@ -1107,7 +1107,7 @@ def iterate_solution_chunks(self): data = shared_dict.attach(self._data_dict_name) soldict = data['solutions'] - for key in soldict.iterkeys(): + for key in soldict.keys(): yield soldict[key] def save(self, final=False): @@ -1125,12 +1125,12 @@ def save(self, final=False): for subset in self._subsets: if subset.label is None: - print>> log(0, "blue"), "{}: saving MS rows {}~{}".format(self.label, self.first_row0, self.last_row0) + print("{}: saving MS rows {}~{}".format(self.label, self.first_row0, self.last_row0), file=log(0, "blue")) data = data0 else: - print>> log(0, "blue"), "{}: saving MS rows {}~{}, {} ({} rows)".format(self.label, self.first_row0, + print("{}: saving MS rows {}~{}, {} ({} rows)".format(self.label, self.first_row0, self.last_row0, subset.label, - len(subset.rows0)) + len(subset.rows0)), file=log(0, "blue")) data = shared_dict.attach(subset.datadict) # insert output columns, if needed, and reopen MS if they were actually added @@ -1153,7 +1153,7 @@ def save(self, final=False): # covis = self.dh.parallactic_machine.derotate(subset.time_col, covis, subset.antea, subset.anteb, # angles=subset._angles) covis = subset.upsample(covis) - print>> log, " writing {} column".format(self.dh.output_column) + print(" writing {} column".format(self.dh.output_column), file=log) self.dh.putslice(self.dh.output_column, covis, subset=table_subset) subset._angles = None @@ -1166,13 +1166,13 @@ def save(self, final=False): else: model = model.sum(axis=0) model = subset.upsample(model) - print>> log, " writing {} column".format(self.dh.output_model_column) + print(" writing {} column".format(self.dh.output_model_column), file=log) self.dh.putslice(self.dh.output_model_column, model, subset=table_subset) #writing outputs weights if any if self.dh.output_weight_column and data0['updated'][2]: outweights = subset.upsample(data['outweights']) - print>> log, " writing {} weight column".format(self.dh.output_weight_column) + print(" writing {} weight column".format(self.dh.output_weight_column), file=log) self.dh.putslice(self.dh.output_weight_column, outweights, subset=table_subset) # write flags if (a) solver has generated flags, and we're saving them, (b) always, if auto-filling BITFLAG column @@ -1195,22 +1195,22 @@ def save(self, final=False): self.bflagcol[newflags] |= self.dh._save_bitflag bflag_col = True if self.dh._save_flags: - print>> log, " {:.2%} visibilities flagged by solver: saving to BITFLAG and FLAG columns".format(ratio) + print(" {:.2%} visibilities flagged by solver: saving to BITFLAG and FLAG columns".format(ratio), file=log) flag_col = self.bflagcol != 0 else: - print>> log, " {:.2%} visibilities flagged by solver: saving to BITFLAG column only".format(ratio) + print(" {:.2%} visibilities flagged by solver: saving to BITFLAG column only".format(ratio), file=log) # else write to FLAG/FLAG_ROW only, if asked to elif self.dh._save_flags: - print>> log, " {:.2%} visibilities flagged by solver: saving to FLAG column".format(ratio) + print(" {:.2%} visibilities flagged by solver: saving to FLAG column".format(ratio), file=log) self._flagcol[newflags] = True flag_col = self._flagcol # else just message else: - print>> log, " {:.2%} visibilities flagged by solver, but we're not saving flags".format(ratio) + print(" {:.2%} visibilities flagged by solver, but we're not saving flags".format(ratio), file=log) else: - print>> log, " no new flags were generated" + print(" no new flags were generated", file=log) if self.dh._save_flags_apply: prior_flags = subset.upsample((data['flags'] & FL.PRIOR) != 0) @@ -1219,7 +1219,7 @@ def save(self, final=False): else: flag_col |= prior_flags ratio = prior_flags.sum() / float(prior_flags.size) - print>> log, " also transferring {:.2%} input flags (--flags-save-legacy apply)".format(ratio) + print(" also transferring {:.2%} input flags (--flags-save-legacy apply)".format(ratio), file=log) # now figure out what to write # this is set if BITFLAG/BITFLAG_ROW is to be written out @@ -1227,11 +1227,11 @@ def save(self, final=False): self.dh.putslice("BITFLAG", self.bflagcol, subset=table_subset) totflags = (self.bflagcol != 0).sum() self.dh.flagcounts['OUT'] += totflags - print>> log, " updated BITFLAG column ({:.2%} visibilities flagged)".format(totflags / float(self.bflagcol.size)) + print(" updated BITFLAG column ({:.2%} visibilities flagged)".format(totflags / float(self.bflagcol.size)), file=log) self.bflagrow = np.bitwise_and.reduce(self.bflagcol, axis=(-1, -2)) table_subset.putcol("BITFLAG_ROW", self.bflagrow) - print>> log, " updated BITFLAG_ROW column ({:.2%} rows flagged)".format( - (self.bflagrow!=0).sum()/float(self.bflagrow.size)) + print(" updated BITFLAG_ROW column ({:.2%} rows flagged)".format( + (self.bflagrow!=0).sum()/float(self.bflagrow.size)), file=log) #prevents memory leak by clearing self.bflagcol = self.bflagrow = None @@ -1242,10 +1242,10 @@ def save(self, final=False): totflags = flag_col.sum() if bflag_col is None: # only count if not counted above self.dh.flagcounts['OUT'] += totflags - print>> log, " updated FLAG column ({:.2%} visibilities flagged)".format(totflags / float(flag_col.size)) + print(" updated FLAG column ({:.2%} visibilities flagged)".format(totflags / float(flag_col.size)), file=log) flag_row = flag_col.all(axis=(-1, -2)) table_subset.putcol("FLAG_ROW", flag_row) - print>> log, " updated FLAG_ROW column ({:.2%} rows flagged)".format(flag_row.sum() / float(flag_row.size)) + print(" updated FLAG_ROW column ({:.2%} rows flagged)".format(flag_row.sum() / float(flag_row.size)), file=log) if final: self.dh.finalize() diff --git a/cubical/database/iface_database.py b/cubical/database/iface_database.py index 8703e50a..6d1b80e7 100644 --- a/cubical/database/iface_database.py +++ b/cubical/database/iface_database.py @@ -7,8 +7,7 @@ """ import abc -class iface_database(object): - __metaclass__ = abc.ABCMeta +class iface_database(object, metaclass=abc.ABCMeta): @abc.abstractmethod def __init__(self): raise NotImplementedError("To be defined") diff --git a/cubical/database/parameter.py b/cubical/database/parameter.py index 3bbb7db5..2f5f1e20 100644 --- a/cubical/database/parameter.py +++ b/cubical/database/parameter.py @@ -22,7 +22,7 @@ class _Record(object): """ def __init__(self, **kw): - for key, value in kw.iteritems(): + for key, value in kw.items(): setattr(self, key, value) @@ -67,7 +67,7 @@ def __init__(self, name, dtype, axes, interpolation_axes=[], empty=0, metadata=N """ interpolation_axes = interpolation_axes or [] assert (len(interpolation_axes) in [0, 1, 2]) - print>> log(1), "defining parameter '{}' over {}".format(name, ",".join(axes)) + print("defining parameter '{}' over {}".format(name, ",".join(axes)), file=log(1)) self.name, self.dtype, self.axis_labels = name, dtype, axes self.empty, self.metadata = empty, metadata @@ -169,8 +169,8 @@ def _update_shape(self, shape, grid): elif not self.shape[i]: self.shape[i] = shape[i] elif self.shape[i] != shape[i]: - raise ValueError, "axis {} of length {} does not match previously defined length {}".format( - axis, shape[i], self.shape[i]) + raise ValueError("axis {} of length {} does not match previously defined length {}".format( + axis, shape[i], self.shape[i])) def _finalize_shape(self): """ @@ -208,7 +208,7 @@ def _finalize_shape(self): gmax = float(g1.max()) or 1 self._norm_grid[iaxis] = g1 = g1 / gmax self._gminmax[iaxis] = gmin, gmax - print>> log(0), "dimensions of {} are {}".format(self.name, ','.join(map(str, self.shape))) + print("dimensions of {} are {}".format(self.name, ','.join(map(str, self.shape))), file=log(0)) return True def _to_norm(self, iaxis, g): @@ -255,7 +255,7 @@ def _init_arrays(self): np.ones(self.shape, bool), fill_value=self.empty) self._array_slices = {} - print>> log(0), " loading {}, shape {}".format(self.name, 'x'.join(map(str, self.shape))) + print(" loading {}, shape {}".format(self.name, 'x'.join(map(str, self.shape))), file=log(0)) def _paste_slice(self, item): """ @@ -293,12 +293,12 @@ def _finalize_arrays(self): slicers.append((None,)) else: slicer_axes.append(i) - slicers.append(xrange(shape)) + slicers.append(range(shape)) self._interpolators = {} # get grid over interpolatable axes - print>> log(2), "decomposing {} into slices".format(self.name) + print("decomposing {} into slices".format(self.name), file=log(2)) # loop over all not-interpolatable slices (e.g. direction, antenna, correlation) for slicer in itertools.product(*slicers): array_slicer = tuple([slice(None) if sl is None else sl for sl in slicer]) @@ -311,8 +311,8 @@ def _finalize_arrays(self): subset = [slice(None) for _ in interpol_axes] if flags is not np.ma.nomask: # now, for every axis in the slice, cut out fully flagged points - allaxis = set(xrange(array.ndim)) - for iaxis in xrange(array.ndim): + allaxis = set(range(array.ndim)) + for iaxis in range(array.ndim): # find points on this axis which are fully flagged along other axes if array.ndim == 1: allflag = flags @@ -320,14 +320,14 @@ def _finalize_arrays(self): allflag = flags.all(axis=tuple(allaxis - {iaxis})) # all flagged? Indicate this by array=None if allflag.all(): - print>> log(2), " slice {} fully flagged".format(slicer) + print(" slice {} fully flagged".format(slicer), file=log(2)) array = None break # if such points exist, extract subset of array and grid elif allflag.any(): - print>> log(2), " slice {} flagged at {} {} points".format(slicer, allflag.sum(), + print(" slice {} flagged at {} {} points".format(slicer, allflag.sum(), self.axis_labels[ - interpol_axes[iaxis]]) + interpol_axes[iaxis]]), file=log(2)) # make corresponding slice array_slice = [slice(None)] * array.ndim # also set subset to the mask of the valid points @@ -491,15 +491,15 @@ def reinterpolate(self, **grid): # create output array of corresponding shape output_array = np.full(output_shape, self.empty, self.dtype) - print>> log(1), "will interpolate {} solutions onto {} grid".format(self.name, - "x".join(map(str, output_shape))) + print("will interpolate {} solutions onto {} grid".format(self.name, + "x".join(map(str, output_shape))), file=log(1)) # now loop over all slices for slicer, out_slicer in zip(itertools.product(*input_slicers), itertools.product(*output_slicers)): # arse is the current array slice we work with arse = self._array_slices[slicer] if arse.array is None: - print>> log(2), " slice {} fully flagged".format(slicer) + print(" slice {} fully flagged".format(slicer), file=log(2)) else: # Check which subset of the slice needs to be interpolated # We build up the following lists describing the interpolation process @@ -550,8 +550,8 @@ def reinterpolate(self, **grid): if not interpolator or len(input_grid_segment0) != len(input_grid_segment) or \ not all([ia == ja and i0 <= j0 and i1 >= j1 for (ia, i0, i1), (ja, j0, j1) in zip(input_grid_segment0, input_grid_segment)]): - print>> log(2), " slice {} preparing {}D interpolator for {}".format(slicer, - len(segment_grid), ",".join(["{}:{}".format(*seg[1:]) for seg in input_grid_segment])) + print(" slice {} preparing {}D interpolator for {}".format(slicer, + len(segment_grid), ",".join(["{}:{}".format(*seg[1:]) for seg in input_grid_segment])), file=log(2)) # arav: linear array of all values, adata: all unflagged values arav = arse.array[tuple(array_segment_slice)].ravel() adata = arav.data[~arav.mask] if arav.mask is not np.ma.nomask else arav.data @@ -597,15 +597,15 @@ def reinterpolate(self, **grid): coords = np.array([x.ravel() for x in np.meshgrid(*output_coord, indexing='ij')]) # call interpolator. Reshape into output slice shape result = interpolator(coords.T).reshape(interp_shape) - print>> log(2), " interpolated onto {} grid".format("x".join(map(str, interp_shape))) + print(" interpolated onto {} grid".format("x".join(map(str, interp_shape))), file=log(2)) output_array[out_slicer] = result[tuple(interp_broadcast)] # return array, throwing out unneeded axes output_array = output_array[tuple(output_reduction)] # also, mask missing values from the interpolator with the fill value missing = np.isnan(output_array) output_array[missing] = self.empty - print>> log(1), "{} solutions: interpolation results in {}/{} missing values".format(self.name, - missing.sum(), missing.size) + print("{} solutions: interpolation results in {}/{} missing values".format(self.name, + missing.sum(), missing.size), file=log(1)) return masked_array(output_array, missing, fill_value=self.empty) @@ -635,8 +635,8 @@ def lookup(self, **grid): output_array = np.full(output_shape, self.empty, self.dtype) output_mask = np.ones(output_shape, bool) - print>> log(1), "will lookup {} solutions on {} grid".format(self.name, - "x".join(map(str, output_shape))) + print("will lookup {} solutions on {} grid".format(self.name, + "x".join(map(str, output_shape))), file=log(1)) # now loop over all slices @@ -644,7 +644,7 @@ def lookup(self, **grid): # arse is the current array slice we work with arse = self._array_slices[slicer] if arse.array is None: - print>> log(2), " slice {} fully flagged".format(slicer) + print(" slice {} fully flagged".format(slicer), file=log(2)) else: # segment_grid: float array of normalized coordinates corresponding # to segment being interpolated over @@ -660,8 +660,8 @@ def lookup(self, **grid): ij = [ (i, gmap.get(x)) for i,x in enumerate(outgr) ] input_indices.append([ j for i,j in ij if j is not None]) output_indices.append([ i for i,j in ij if j is not None]) - print>> log(2), " slice {}: looking up {} valid points".format(slicer, - "x".join([str(len(idx)) for idx in input_indices])) + print(" slice {}: looking up {} valid points".format(slicer, + "x".join([str(len(idx)) for idx in input_indices])), file=log(2)) out = output_array[out_slicer] outmask = output_mask[out_slicer] @@ -677,8 +677,8 @@ def lookup(self, **grid): output_mask = output_mask[tuple(output_reduction)] output_array[output_mask] = self.empty - print>> log(1), "{} solutions: interpolation results in {}/{} missing values".format(self.name, - output_mask.sum(), output_mask.size) + print("{} solutions: interpolation results in {}/{} missing values".format(self.name, + output_mask.sum(), output_mask.size), file=log(1)) return masked_array(output_array, output_mask, fill_value=self.empty) @@ -696,7 +696,7 @@ def match_grids(self, **grid): True if all coordinate values match the parameter grid. False if at least one doesn't. """ - for axis, gridvalues in grid.iteritems(): + for axis, gridvalues in grid.items(): iaxis = self.axis_index[axis] if not set(gridvalues).issubset(self._grid_set[iaxis]): return False diff --git a/cubical/database/pickled_db.py b/cubical/database/pickled_db.py index 03087457..c1414759 100644 --- a/cubical/database/pickled_db.py +++ b/cubical/database/pickled_db.py @@ -6,7 +6,7 @@ Handles parameter databases which can contain solutions and other relevant values. """ -import cPickle, os, os.path +import pickle, os, os.path import numpy as np import traceback from cubical.tools import logger, ModColor @@ -15,7 +15,7 @@ from collections import OrderedDict, Iterator from cubical.database.parameter import Parameter, _Record -from iface_database import iface_database +from .iface_database import iface_database class _ParmSegment(_Record): """ A ParmSegment is just a Record -- we just want it to be a special type so that it @@ -52,11 +52,11 @@ def _create(self, filename, metadata={}, backup=True, **kw): self.metadata = OrderedDict(mode=self.MODE_FRAGMENTED, time=time.time(), **metadata) # we'll write to a temp file, and do a backup on successful closure self._fobj = open(filename + ".tmp", 'w') - cPickle.dump(self.metadata, self._fobj) + pickle.dump(self.metadata, self._fobj) self._fobj.flush() self._parameters = {} self._parm_written = set() - print>> log(0), "creating {} in {} mode".format(self.filename, self.metadata['mode']) + print("creating {} in {} mode".format(self.filename, self.metadata['mode']), file=log(0)) def define_param(self, *args, **kw): """ @@ -95,13 +95,13 @@ def add_chunk(self, name, array, grid={}): assert (parm is not None) # dump parm to DB the first time a slice shows up if name not in self._parm_written: - cPickle.dump(parm, self._fobj, 2) + pickle.dump(parm, self._fobj, 2) self._parm_written.add(name) # update axis shapes and grids based on slice parm._update_shape(array.shape, grid) # dump slice to DB item = _ParmSegment(name=name, array=np.ma.asarray(array), grid=grid) - cPickle.dump(item, self._fobj, 2) + pickle.dump(item, self._fobj, 2) def close(self): """ Closes the database. """ @@ -118,13 +118,13 @@ def close(self): def _save_desc(self): """ Helper function. Writes accumulated parameter descriptions to filename.desc. """ - for desc in self._parameters.itervalues(): + for desc in self._parameters.values(): desc._finalize_shape() - for key in self._parameters.keys(): + for key in list(self._parameters.keys()): if not self._parameters[key]._populated: del self._parameters[key] - cPickle.dump(self._parameters, open(self.filename + ".skel", 'w'), 2) - print>> log(0), "saved updated parameter skeletons to {}".format(self.filename + ".skel") + pickle.dump(self._parameters, open(self.filename + ".skel", 'w'), 2) + print("saved updated parameter skeletons to {}".format(self.filename + ".skel"), file=log(0)) def _backup_and_rename(self, backup): """ @@ -138,15 +138,15 @@ def _backup_and_rename(self, backup): if backup: backup_filename = os.path.join(os.path.dirname(self.filename), "~" + os.path.basename(self.filename)) - print>> log(0), "previous DB will be backed up as " + backup_filename + print("previous DB will be backed up as " + backup_filename, file=log(0)) if os.path.exists(backup_filename): - print>> log(0), " removing old backup " + backup_filename + print(" removing old backup " + backup_filename, file=log(0)) os.unlink(backup_filename) os.rename(self.filename, backup_filename) else: os.unlink(self.filename) os.rename(self.filename + ".tmp", self.filename) - print>> log(0), "wrote {} in {} mode".format(self.filename, self.metadata['mode']) + print("wrote {} in {} mode".format(self.filename, self.metadata['mode']), file=log(0)) def save(self, filename=None, backup=True): """ @@ -162,10 +162,10 @@ def save(self, filename=None, backup=True): self.metadata['mode'] = self.MODE_CONSOLIDATED filename = filename or self.filename with open(filename + ".tmp", 'w') as fobj: - cPickle.dump(self.metadata, fobj, 2) - for parm in self._parameters.itervalues(): + pickle.dump(self.metadata, fobj, 2) + for parm in self._parameters.values(): parm.release_cache() - cPickle.dump(self._parameters, fobj, 2) + pickle.dump(self._parameters, fobj, 2) # successfully written? Backup and rename self.filename = filename self._backup_and_rename(backup) @@ -176,14 +176,14 @@ def save(self, filename=None, backup=True): class _Unpickler(Iterator): def __init__(self, filename): self.fobj = open(filename) - self.metadata = cPickle.load(self.fobj) + self.metadata = pickle.load(self.fobj) if type(self.metadata) is not OrderedDict or not "mode" in self.metadata: raise IOError("{}: invalid metadata entry".format(filename)) self.mode = self.metadata['mode'] - def next(self): + def __next__(self): try: - return cPickle.load(self.fobj) + return pickle.load(self.fobj) except EOFError: raise StopIteration @@ -201,19 +201,19 @@ def _load(self, filename): self.filename = filename db = self._Unpickler(filename) - print>> log(0), "reading {} in {} mode".format(self.filename, db.mode) + print("reading {} in {} mode".format(self.filename, db.mode), file=log(0)) self.metadata = db.metadata - for key, value in self.metadata.iteritems(): + for key, value in self.metadata.items(): if key != "mode": - print>> log(1), " metadata '{}': {}".format(key, value) + print(" metadata '{}': {}".format(key, value), file=log(1)) # now load differently depending on mode # in consolidated mode, just unpickle the parameter objects if db.mode == PickledDatabase.MODE_CONSOLIDATED: - self._parameters = db.next() - for parm in self._parameters.itervalues(): - print>> log(1), " read {} of shape {}".format(parm.name, - 'x'.join(map(str, parm.shape))) + self._parameters = next(db) + for parm in self._parameters.values(): + print(" read {} of shape {}".format(parm.name, + 'x'.join(map(str, parm.shape))), file=log(1)) return # otherwise we're in fragmented mode @@ -224,17 +224,17 @@ def _load(self, filename): descfile = filename + '.skel' self._parameters = None if not os.path.exists(descfile): - print>> log(0), ModColor.Str("{} does not exist, will try to rebuild".format(descfile)) + print(ModColor.Str("{} does not exist, will try to rebuild".format(descfile)), file=log(0)) elif os.path.getmtime(descfile) < os.path.getmtime(self.filename): - print>> log(0), ModColor.Str("{} older than database: will try to rebuild".format(descfile)) + print(ModColor.Str("{} older than database: will try to rebuild".format(descfile)), file=log(0)) elif os.path.getmtime(descfile) < os.path.getmtime(__file__): - print>> log(0), ModColor.Str("{} older than this code: will try to rebuild".format(descfile)) + print(ModColor.Str("{} older than this code: will try to rebuild".format(descfile)), file=log(0)) else: try: - self._parameters = cPickle.load(open(descfile, 'r')) + self._parameters = pickle.load(open(descfile, 'r')) except: traceback.print_exc() - print>> log(0), ModColor.Str("error loading {}, will try to rebuild".format(descfile)) + print(ModColor.Str("error loading {}, will try to rebuild".format(descfile)), file=log(0)) # rebuild the skeletons, if they weren't loaded if self._parameters is None: self._parameters = {} @@ -248,7 +248,7 @@ def _load(self, filename): self._save_desc() # initialize arrays - for parm in self._parameters.itervalues(): + for parm in self._parameters.values(): parm._init_arrays() # go over all slices to paste them into the arrays @@ -259,19 +259,19 @@ def _load(self, filename): elif type(item) is _ParmSegment: parm = self._parameters.get(item.name) if parm is None: - raise IOError, "{}: no parm found for {}'".format(filename, item.name) + raise IOError("{}: no parm found for {}'".format(filename, item.name)) parm._paste_slice(item) else: raise IOError("{}: unknown item type '{}'".format(filename, type(item))) # ok, now arrays and flags each contain a full-sized array. Break it up into slices. - for parm in self._parameters.itervalues(): + for parm in self._parameters.values(): parm._finalize_arrays() def names(self): """ Returns names of all defined parameters. """ - return self._parameters.keys() + return list(self._parameters.keys()) def __contains__(self, name): return name in self._parameters diff --git a/cubical/flagging.py b/cubical/flagging.py index 90a83f12..3b8028cf 100644 --- a/cubical/flagging.py +++ b/cubical/flagging.py @@ -43,7 +43,7 @@ class FL(object): def categories(): """ Returns dict of all possible flag categories. """ - return OrderedDict([(attr, value) for attr, value in FL.__dict__.iteritems() + return OrderedDict([(attr, value) for attr, value in FL.__dict__.items() if attr[0] != "_" and type(value) is FL.dtype]) class Flagsets (object): @@ -74,19 +74,19 @@ def __init__ (self,ms): if isinstance(bit,int): self.bits[name] = bit else: - print "Warning: unexpected type (%s) for %s keyword of BITFLAG column," \ - " ignoring"%(type(bit),kw) + print("Warning: unexpected type (%s) for %s keyword of BITFLAG column," \ + " ignoring"%(type(bit),kw)) # have we found any FLAGSET_ specs? if self.bits: order = 'FLAGSETS' in kws and ms.getcolkeyword('BITFLAG','FLAGSETS') if isinstance(order,str): order = order.split(',') else: - print "Warning: unexpected type (%s) for FLAGSETS keyword of BITFLAG column," \ - " ignoring"%type(order) + print("Warning: unexpected type (%s) for FLAGSETS keyword of BITFLAG column," \ + " ignoring"%type(order)) order = [] # form up "natural" order by comparing bitmasks - bitwise_order = list(self.bits.iterkeys()) + bitwise_order = list(self.bits.keys()) bitwise_order.sort(lambda a,b:cmp(self.bits[a],self.bits[b])) # if an order is specified, make sure it is actually valid, # and add any elements from bitwise_order that are not present @@ -100,14 +100,14 @@ def __init__ (self,ms): elif 'NAMES' in kws: names = ms.getcolkeyword('BITFLAG','NAMES') if isinstance(names,(list,tuple)): - self.order = map(str,names) + self.order = list(map(str,names)) bit = 1 for name in self.order: self.bits[name] = bit bit <<= 1 if ms.iswritable(): ms._putkeyword('BITFLAG','FLAGSETS',-1,False,','.join(self.order)) - for name,bit in self.bits.iteritems(): + for name,bit in self.bits.items(): ms._putkeyword('BITFLAG','FLAGSET_%s'%name,-1,False,bit) ms.flush() else: @@ -146,18 +146,18 @@ def flagmask (self,name,create=False): # lookup flagbit, return if found if self.order is None: - raise TypeError,"MS does not contain a BITFLAG column. Please run the addbitflagcol" \ - " utility on this MS." + raise TypeError("MS does not contain a BITFLAG column. Please run the addbitflagcol" \ + " utility on this MS.") bit = self.bits.get(name,None) if bit is not None: return bit # raise exception if not allowed to create a new one if not create: - raise ValueError,"Flagset '%s' not found"%name + raise ValueError("Flagset '%s' not found"%name) # find empty bit for bitnum in range(32): bit = 1<> log, ModColor.Str("no valid solutions anywhere: skipping post-solution flagging.") + print(ModColor.Str("no valid solutions anywhere: skipping post-solution flagging."), file=log) return None chi2n = st.timechan.chi2n @@ -234,8 +234,8 @@ def flag_chisq (st, GD, basename, nddid): median = np.ma.median(chi2) median_np = np.ma.median(chi2n) - print>>log, "median chi2 value is {:.3} from {} valid t/f slots".format(median, total) - print>>log, "median count per slot is {}".format(median_np) + print("median chi2 value is {:.3} from {} valid t/f slots".format(median, total), file=log) + print("median count per slot is {}".format(median_np), file=log) chi_median_thresh = GD["postmortem"]["tf-chisq-median"] np_median_thresh = GD["postmortem"]["tf-np-median"] @@ -263,8 +263,8 @@ def flag_chisq (st, GD, basename, nddid): flag = (chi2 > chi_median_thresh * median) chi2[flag] = np.ma.masked nflag = flag.sum() - print>>log, "{} slots ({:.2%}) flagged on chi2 > {}*median".format(nflag, nflag/float(total), - chi_median_thresh) + print("{} slots ({:.2%}) flagged on chi2 > {}*median".format(nflag, nflag/float(total), + chi_median_thresh), file=log) if make_plots: pylab.subplot(163) @@ -275,8 +275,8 @@ def flag_chisq (st, GD, basename, nddid): flag2 = (chi2n < np_median_thresh * median_np) n_new = (flag2&~flag).sum() - print>>log, "{} more slots ({:.2%}) flagged on counts < {}*median".format(n_new, - n_new/float(total), np_median_thresh) + print("{} more slots ({:.2%}) flagged on counts < {}*median".format(n_new, + n_new/float(total), np_median_thresh), file=log) flag |= flag2 chi2[flag] = np.ma.masked @@ -293,13 +293,13 @@ def flag_chisq (st, GD, basename, nddid): freqcount = flag.sum(axis=0) freqflags = freqcount > nt * chan_density n_new = (freqflags&~(freqcount==nt)).sum() - print>>log, "{} more channels flagged on density > {}".format(n_new, chan_density) + print("{} more channels flagged on density > {}".format(n_new, chan_density), file=log) # flag timeslots with overdense flagging timecount = flag.sum(axis=1) timeflags = timecount > nf * time_density n_new = (timeflags&~(timecount==nf)).sum() - print>>log, "{} more timeslots flagged on density > {}".format(n_new, time_density) + print("{} more timeslots flagged on density > {}".format(n_new, time_density), file=log) flag = flag | freqflags[np.newaxis,:] | timeflags[:,np.newaxis] chi2[flag] = np.ma.masked @@ -318,7 +318,7 @@ def flag_chisq (st, GD, basename, nddid): ddidcounts = flag3.sum(axis=(0, 2)) ddidflags = ddidcounts > maxcount * ddid_density n_new = (ddidflags&~(ddidcounts==maxcount)).sum() - print>>log, "{} more ddids flagged on density > {}".format(n_new, ddid_density) + print("{} more ddids flagged on density > {}".format(n_new, ddid_density), file=log) flag3 |= ddidflags[np.newaxis, :, np.newaxis] chi2[flag] = np.ma.masked @@ -330,7 +330,7 @@ def flag_chisq (st, GD, basename, nddid): pylab.colorbar() filename = basename+".chiflag.png" pylab.savefig(filename, DPI=plots.DPI) - print>> log, "saved chi-sq flagging plot to "+filename + print("saved chi-sq flagging plot to "+filename, file=log) if show_plots: pylab.show() diff --git a/cubical/kernels/__init__.py b/cubical/kernels/__init__.py index 03fc4d74..e41ec599 100644 --- a/cubical/kernels/__init__.py +++ b/cubical/kernels/__init__.py @@ -89,7 +89,7 @@ def import_kernel(name): except ImportError: if not name.endswith("_omp"): raise - print("import_kernel({}): failed, trying fallback".format(name)) + print(("import_kernel({}): failed, trying fallback".format(name))) finally: _omp_import.pop() diff --git a/cubical/machines/abstract_machine.py b/cubical/machines/abstract_machine.py index ef7d5808..4bf8ed98 100644 --- a/cubical/machines/abstract_machine.py +++ b/cubical/machines/abstract_machine.py @@ -15,7 +15,7 @@ log = logger.getLogger("gain_machine") -class MasterMachine(object): +class MasterMachine(object, metaclass=ABCMeta): """ This is a base class for all solution machines. It is completely generic and lays out the basic requirements for all machines. @@ -24,8 +24,6 @@ class MasterMachine(object): solution tables on disk. """ - __metaclass__ = ABCMeta - def __init__(self, jones_label, data_arr, ndir, nmod, times, freqs, chunk_label, options, diagonal=None): """ Initializes a gain machine. @@ -609,26 +607,26 @@ def _load_solutions(self, init_sols): """ sols = {} # collect importable solutions from DB, interpolate - for label, grids in self.importable_solutions().iteritems(): + for label, grids in self.importable_solutions().items(): db, prefix, interpolate = init_sols.get(self.jones_label, (None, None, False)) name = "{}:{}".format(prefix, label) if db is not None: if name in db: if interpolate: - print>>log,"{}: interpolating {} from {}".format(self.chunk_label, name, db.filename) + print("{}: interpolating {} from {}".format(self.chunk_label, name, db.filename), file=log) sols[label] = sol = db[name].reinterpolate(**grids) else: if not db[name].match_grids(**grids): raise ValueError("{} does not define {} on the correct grid. Consider using " "-xfer-from rather than -load-from".format(name, db.filename)) - print>> log, "{}: loading {} from {}".format(self.chunk_label, name, db.filename) + print("{}: loading {} from {}".format(self.chunk_label, name, db.filename), file=log) sols[label] = sol = db[name].lookup(**grids) if sol.count() != sol.size: - print>>log, "{}: {:.2%} valid {} slots populated".format( - self.chunk_label, sol.count()/float(sol.size), name) + print("{}: {:.2%} valid {} slots populated".format( + self.chunk_label, sol.count()/float(sol.size), name), file=log) db[name].release_cache() else: - print>>log,"{}: {} not in {}".format(self.chunk_label, name, db.filename) + print("{}: {} not in {}".format(self.chunk_label, name, db.filename), file=log) # if anything at all was loaded from DB, import if sols: self.import_solutions(sols) @@ -741,7 +739,7 @@ def _init_solutions(self, label, load_from, interpolate, save_to, exportables): """ # init solutions from database if load_from: - print>>log(0, "blue"), "{} solutions will be initialized from {}".format(label, load_from) + print("{} solutions will be initialized from {}".format(label, load_from), file=log(0, "blue")) if "//" in load_from: filename, prefix = load_from.rsplit("//", 1) else: @@ -750,9 +748,9 @@ def _init_solutions(self, label, load_from, interpolate, save_to, exportables): # create database to save to if save_to: # define parameters in DB - for sol_label, (empty_value, axes) in exportables.iteritems(): + for sol_label, (empty_value, axes) in exportables.items(): self.define_param(save_to, "{}:{}".format(label, sol_label), empty_value, axes) - print>> log(0), "{} solutions will be saved to {}".format(label, save_to) + print("{} solutions will be saved to {}".format(label, save_to), file=log(0)) def define_param(self, save_to, name, empty_value, axes, interpolation_axes=("time", "freq")): @@ -822,7 +820,7 @@ def export_solutions(self, gm, subdict): # has the gain machine added a prefix to the names already (as the chain machine does) is_prefixed = sols.pop('prefixed', False) # populate values subdictionary - for label, (value, grid) in sols.iteritems(): + for label, (value, grid) in sols.items(): name = label if is_prefixed else "{}:{}".format(gm.jones_label, label) subdict[name] = value.data subdict["{}:grid__".format(name)] = grid @@ -844,19 +842,19 @@ def save_solutions(self, subdict): Shared dictionary to be saved. This is presumed to be populated by export_solutions() above. """ # add slices for all parameters - for name in subdict.iterkeys(): + for name in subdict.keys(): if not name.endswith("__") and name in self._save_sols: sd = subdict["{}:grid__".format(name)] - grids = {key: sd[key] for key in sd.iterkeys()} + grids = {key: sd[key] for key in sd.keys()} self.get_solution_db(name).add_chunk(name, masked_array(subdict[name], subdict[name+":flags__"]), grids) def close(self): """ Closes all solution databases and releases various caches. """ - for db, prefix, _ in self._init_sols.values(): + for db, prefix, _ in list(self._init_sols.values()): db.close() - for db in self._save_sols_byname.values(): + for db in list(self._save_sols_byname.values()): db.close() self._init_sols = {} self._save_sols = {} @@ -886,7 +884,7 @@ def set_metas(self, src): Args: src: instance of cubical.data_handler """ - for db in self._save_sols_byname.values(): + for db in list(self._save_sols_byname.values()): db.export_CASA_gaintable = self.global_options["out"].get("casa-gaintables", True) db.set_metadata(src) diff --git a/cubical/machines/complex_2x2_machine.py b/cubical/machines/complex_2x2_machine.py index 4dd86925..d6dc5c10 100644 --- a/cubical/machines/complex_2x2_machine.py +++ b/cubical/machines/complex_2x2_machine.py @@ -73,7 +73,7 @@ def precompute_attributes(self, data_arr, model_arr, flags_arr, noise): pzd = np.angle(dm_sum/dabs_sum) pzd[dabs_sum==0] = 0 - print>>log(2),"{}: PZD estimate {}".format(self.chunk_label, pzd) + print("{}: PZD estimate {}".format(self.chunk_label, pzd), file=log(2)) self.gains[:,:,:,:,1,1] = np.exp(-1j*pzd)[np.newaxis,:,:,np.newaxis] diff --git a/cubical/machines/complex_W_2x2_machine.py b/cubical/machines/complex_W_2x2_machine.py index 5e7f4e92..f857e198 100644 --- a/cubical/machines/complex_W_2x2_machine.py +++ b/cubical/machines/complex_W_2x2_machine.py @@ -256,7 +256,7 @@ def _brute_solve_v(wn): root = vvals[np.argmin(np.abs(fvals))] if self.iters % 5 == 0 or self.iters == 1: - print>> log(2), "{} : {} iters: v-parameter is {}".format(self.label, self.iters, root) + print("{} : {} iters: v-parameter is {}".format(self.label, self.iters, root), file=log(2)) return root diff --git a/cubical/machines/ifr_gain_machine.py b/cubical/machines/ifr_gain_machine.py index f98fb8ec..3e8530e3 100644 --- a/cubical/machines/ifr_gain_machine.py +++ b/cubical/machines/ifr_gain_machine.py @@ -36,35 +36,35 @@ def __init__(self, gmfactory, ifrgain_opts, compute=True): self._ifrgains_per_chan = ifrgain_opts['per-chan'] self._ifrgain = None self._nfreq = gmfactory.grid["freq"] - nfreq, nant, ncorr = [len(gmfactory.grid[axis]) for axis in "freq", "ant", "corr"] + nfreq, nant, ncorr = [len(gmfactory.grid[axis]) for axis in ("freq", "ant", "corr")] if load_from: filename = load_from - print>> log(0), ModColor.Str("applying baseline-based corrections (BBCs) from {}".format(filename), - col="green") + print(ModColor.Str("applying baseline-based corrections (BBCs) from {}".format(filename), + col="green"), file=log(0)) if "//" in filename: filename, prefix = filename.rsplit("//", 1) else: filename, prefix = filename, "BBC" parm = param_db.load(filename).get(prefix) if parm is None: - print>> log(0), ModColor.Str(" no solutions for '{}' in {}".format(prefix, filename)) + print(ModColor.Str(" no solutions for '{}' in {}".format(prefix, filename)), file=log(0)) else: self._ifrgain = parm.reinterpolate(freq=gmfactory.grid["freq"]).filled() if tuple(self._ifrgain.shape) != (nfreq, nant, nant, ncorr, ncorr): - print>> log(0), ModColor.Str(" invalid BBC shape {}, will ignore".format(self._ifrgain.shape)) + print(ModColor.Str(" invalid BBC shape {}, will ignore".format(self._ifrgain.shape)), file=log(0)) self._ifrgain = None else: - print>> log(0), " loaded per-channel BBCs of shape {}".format(filename, self._ifrgain.shape) + print(" loaded per-channel BBCs of shape {}".format(filename, self._ifrgain.shape), file=log(0)) if not self._ifrgains_per_chan: - print>> log(0), " using one mean value across band" + print(" using one mean value across band", file=log(0)) self._ifrgain[np.newaxis,...] = self._ifrgain.mean(axis=0) # reset off-diagonal values, if needed if ifrgain_opts["apply-2x2"]: - print>> log(0), ModColor.Str( - " using full 2x2 BBCs. You'd better know what you're doing!",col="green") + print(ModColor.Str( + " using full 2x2 BBCs. You'd better know what you're doing!",col="green"), file=log(0)) else: self._ifrgain[..., (0, 1), (1, 0)] = 1 - print>>log(0)," using parallel-hand BBCs only" + print(" using parallel-hand BBCs only", file=log(0)) if save_to and compute: self._compute_2x2 = ifrgain_opts["compute-2x2"] # setup axes for IFR-based gains @@ -77,9 +77,9 @@ def __init__(self, gmfactory, ifrgain_opts, compute=True): self._mdh_sum = np.ma.zeros(parm.shape, gmfactory.ctype, fill_value=0) self._ddh_sum = np.ma.zeros(parm.shape, gmfactory.ctype, fill_value=0) # - print>> log(0), "will compute & save suggested baseline-based corrections (BBCs) to {}".format( - self._save_filename) - print>> log(0), " (these can optionally be applied in a subsequent CubiCal run)" + print("will compute & save suggested baseline-based corrections (BBCs) to {}".format( + self._save_filename), file=log(0)) + print(" (these can optionally be applied in a subsequent CubiCal run)", file=log(0)) else: self._ifrgains_grid = None diff --git a/cubical/machines/interval_gain_machine.py b/cubical/machines/interval_gain_machine.py index c5dd2175..f199db47 100644 --- a/cubical/machines/interval_gain_machine.py +++ b/cubical/machines/interval_gain_machine.py @@ -2,7 +2,7 @@ # (c) 2017 Rhodes University & Jonathan S. Kenyon # http://github.com/ratt-ru/CubiCal # This code is distributed under the terms of GPLv2, see LICENSE.md for details -from __future__ import print_function + import numpy as np from cubical.flagging import FL from cubical.machines.abstract_machine import MasterMachine @@ -69,8 +69,8 @@ def __init__(self, label, data_arr, ndir, nmod, times, frequencies, chunk_label, # n_tim and n_fre are the time and frequency dimensions of the data arrays. # n_timint and n_freint are the time and frequency dimensions of the gains. - self.t_bins = range(0, self.n_tim, self.t_int) - self.f_bins = range(0, self.n_fre, self.f_int) + self.t_bins = list(range(0, self.n_tim, self.t_int)) + self.f_bins = list(range(0, self.n_fre, self.f_int)) self.n_timint = len(self.t_bins) self.n_freint = len(self.f_bins) @@ -699,7 +699,7 @@ def conditioning_status_string(self): if self.dd_term: string += " {} dirs".format(self.n_dir) string += " {}/{} ants, MGE {}".format(anteqs, self.n_ant, - " ".join(["{:.3}".format(self.prior_gain_error[idir, :].max()) for idir in xrange(self.n_dir)])) + " ".join(["{:.3}".format(self.prior_gain_error[idir, :].max()) for idir in range(self.n_dir)])) if self._n_flagged_on_max_error is not None: string += ", NFMGE {}".format(" ".join(map(str,self._n_flagged_on_max_error))) @@ -713,7 +713,7 @@ def flagging_stats_string(self): """Returns a string describing per-flagset statistics""" fstats = [] - for flag, mask in FL.categories().iteritems(): + for flag, mask in FL.categories().items(): n_flag = ((self.gflags & mask) != 0).sum() if n_flag: fstats.append("{}:{}({:.2%})".format(flag, n_flag, n_flag/float(self.gflags.size))) @@ -738,7 +738,7 @@ def current_convergence_status_string(self): string += ", max update {:.4}".format(self.max_update) if self.posterior_gain_error is not None: string += ", PGE " + " ".join(["{:.3}".format(self.posterior_gain_error[idir, :].max()) - for idir in xrange(self.n_dir)]) + for idir in range(self.n_dir)]) return string else: return "{}: n/s{}".format(self.jones_label, ", loaded" if self._gains_loaded else "") @@ -758,7 +758,7 @@ def final_convergence_status_string(self): string += ", d/fl {:.2%}".format(self.missing_gain_fraction) if self.posterior_gain_error is not None: string += ", PGE " + " ".join(["{:.3}".format(self.posterior_gain_error[idir, :].max()) - for idir in xrange(self.n_dir)]) + for idir in range(self.n_dir)]) if self._n_flagged_on_max_posterior_error is not None: string += ", NFPGE {}".format(" ".join(map(str,self._n_flagged_on_max_posterior_error))) return string diff --git a/cubical/machines/jones_chain_machine.py b/cubical/machines/jones_chain_machine.py index 5cb25805..ecd7966e 100644 --- a/cubical/machines/jones_chain_machine.py +++ b/cubical/machines/jones_chain_machine.py @@ -9,7 +9,7 @@ import cubical.kernels from cubical.tools import logger -import machine_types +from . import machine_types from cubical.flagging import FL log = logger.getLogger("jones_chain") @@ -133,7 +133,7 @@ def export_solutions(self): # prefix jones label to solution name for term in self.jones_terms: if term.solvable: - for label, sol in term.export_solutions().iteritems(): + for label, sol in term.export_solutions().items(): soldict["{}:{}".format(term.jones_label, label)] = sol soldict['prefixed'] = True @@ -194,7 +194,7 @@ def compute_js(self, obser_arr, model_arr): self.cached_model_arr = cached_model_arr = np.empty_like(model_arr) np.copyto(cached_model_arr, model_arr) - for ind in xrange(self.n_terms - 1, self.active_index, -1): + for ind in range(self.n_terms - 1, self.active_index, -1): term = self.jones_terms[ind] term.apply_gains(cached_model_arr) @@ -217,7 +217,7 @@ def compute_js(self, obser_arr, model_arr): np.copyto(self.jh, self.cached_model_arr) - for ind in xrange(self.active_index, -1, -1): + for ind in range(self.active_index, -1, -1): term = self.jones_terms[ind] self.cychain.cycompute_jh(self.jh, term.gains, *term.gain_intervals) @@ -232,7 +232,7 @@ def compute_js(self, obser_arr, model_arr): self.active_term.cykernel.cycompute_jhr(self.jh, r, self._jhr, 1, 1) - for ind in xrange(0, self.active_index, 1): + for ind in range(0, self.active_index, 1): term = self.jones_terms[ind] g_inv, gh_inv, flag_counts = term.get_inverse_gains() self.cychain.cyapply_left_inv_jones(self._jhr, g_inv, *term.gain_intervals) @@ -420,17 +420,17 @@ def _next_chain_term(self): if self.active_term.solvable: self.active_term.maxiter = self.term_iters.pop(0) if not self.active_term.maxiter: - print>> log(1), "skipping term {}: 0 term iters specified".format(self.active_term.jones_label) + print("skipping term {}: 0 term iters specified".format(self.active_term.jones_label), file=log(1)) continue self.active_term.iters = 0 self._convergence_states_finalized = False if previous_term: previous_term.has_converged = previous_term.has_stalled = False self.active_term.has_converged = self.active_term.has_stalled = False - print>> log(1), "activating term {}".format(self.active_term.jones_label) + print("activating term {}".format(self.active_term.jones_label), file=log(1)) return True else: - print>> log(1), "skipping term {}: non-solvable".format(self.active_term.jones_label) + print("skipping term {}: non-solvable".format(self.active_term.jones_label), file=log(1)) def next_iteration(self): @@ -444,9 +444,9 @@ def next_iteration(self): major_step = False if self.active_term.has_converged or self.active_term.has_stalled: - print>>log(1),"term {} {} ({} iters): {}".format(self.active_term.jones_label, + print("term {} {} ({} iters): {}".format(self.active_term.jones_label, "converged" if self.active_term.has_converged else "stalled", - self.active_term.iters, self.active_term.final_convergence_status_string) + self.active_term.iters, self.active_term.final_convergence_status_string), file=log(1)) self._convergence_states.append(self.active_term.final_convergence_status_string) self._convergence_states_finalized = True self._next_chain_term() diff --git a/cubical/machines/jones_chain_robust_machine.py b/cubical/machines/jones_chain_robust_machine.py index e4c7042d..18956d50 100644 --- a/cubical/machines/jones_chain_robust_machine.py +++ b/cubical/machines/jones_chain_robust_machine.py @@ -8,7 +8,7 @@ import cubical.kernels from cubical.tools import logger -import machine_types +from . import machine_types from cubical.flagging import FL log = logger.getLogger("jones_chain") #TODO check this @@ -107,7 +107,7 @@ def export_solutions(self): # prefix jones label to solution name for term in self.jones_terms: if term.solvable: - for label, sol in term.export_solutions().iteritems(): + for label, sol in term.export_solutions().items(): soldict["{}:{}".format(term.jones_label, label)] = sol soldict['prefixed'] = True @@ -169,7 +169,7 @@ def compute_js(self, obser_arr, model_arr): self.cached_model_arr = cached_model_arr = np.empty_like(model_arr) np.copyto(cached_model_arr, model_arr) - for ind in xrange(self.n_terms - 1, self.active_index, -1): + for ind in range(self.n_terms - 1, self.active_index, -1): term = self.jones_terms[ind] term.apply_gains(cached_model_arr) @@ -192,7 +192,7 @@ def compute_js(self, obser_arr, model_arr): np.copyto(self.jh, self.cached_model_arr) - for ind in xrange(self.active_index, -1, -1): + for ind in range(self.active_index, -1, -1): term = self.jones_terms[ind] self.cychain.cycompute_jh(self.jh, term.gains, *term.gain_intervals) @@ -207,7 +207,7 @@ def compute_js(self, obser_arr, model_arr): #computing jhwr which jhr * the weights self.cykernel.cycompute_jhwr(self.jh, self.residuals, self.weights, self._jhr, 1, 1) - for ind in xrange(0, self.active_index, 1): + for ind in range(0, self.active_index, 1): term = self.jones_terms[ind] g_inv, gh_inv, flag_counts = term.get_inverse_gains() self.cychain.cyapply_left_inv_jones(self._jhr, g_inv, *term.gain_intervals) @@ -427,17 +427,17 @@ def _next_chain_term(self): if self.active_term.solvable: self.active_term.maxiter = self.term_iters.pop(0) if not self.active_term.maxiter: - print>> log(1), "skipping term {}: 0 term iters specified".format(self.active_term.jones_label) + print("skipping term {}: 0 term iters specified".format(self.active_term.jones_label), file=log(1)) continue self.active_term.iters = 0 self._convergence_states_finalized = False if previous_term: previous_term.has_converged = previous_term.has_stalled = False self.active_term.has_converged = self.active_term.has_stalled = False - print>> log(1), "activating term {}".format(self.active_term.jones_label) + print("activating term {}".format(self.active_term.jones_label), file=log(1)) return True else: - print>> log(1), "skipping term {}: non-solvable".format(self.active_term.jones_label) + print("skipping term {}: non-solvable".format(self.active_term.jones_label), file=log(1)) def next_iteration(self): @@ -451,9 +451,9 @@ def next_iteration(self): major_step = False if self.active_term.has_converged or self.active_term.has_stalled: - print>>log(1),"term {} {} ({} iters): {}".format(self.active_term.jones_label, + print("term {} {} ({} iters): {}".format(self.active_term.jones_label, "converged" if self.active_term.has_converged else "stalled", - self.active_term.iters, self.active_term.final_convergence_status_string) + self.active_term.iters, self.active_term.final_convergence_status_string), file=log(1)) self._convergence_states.append(self.active_term.final_convergence_status_string) self._convergence_states_finalized = True self._next_chain_term() diff --git a/cubical/machines/machine_types.py b/cubical/machines/machine_types.py index 82461b17..be41493e 100644 --- a/cubical/machines/machine_types.py +++ b/cubical/machines/machine_types.py @@ -1,7 +1,7 @@ -import complex_2x2_machine -import complex_W_2x2_machine -import phase_diag_machine -import slope_machine +from . import complex_2x2_machine +from . import complex_W_2x2_machine +from . import phase_diag_machine +from . import slope_machine # this provides a map from string "Jones type" identifiers to specific GainMachine classes diff --git a/cubical/machines/parallactic_machine.py b/cubical/machines/parallactic_machine.py index 22b1e21b..05099ebc 100644 --- a/cubical/machines/parallactic_machine.py +++ b/cubical/machines/parallactic_machine.py @@ -1,4 +1,4 @@ -from __future__ import print_function + import pyrap.quanta as pq import pyrap.measures @@ -193,7 +193,7 @@ def unpad(vis, padded_vis): nrow = 100000 nchunk = int(np.ceil(vis.shape[0] / float(nrow))) - for c in xrange(nchunk): + for c in range(nchunk): lb, ub = c * nrow, min((c + 1) * nrow, vis.shape[0]) if ub - lb == 0: break diff --git a/cubical/machines/phase_diag_machine.py b/cubical/machines/phase_diag_machine.py index e73b62e5..86da9dbf 100644 --- a/cubical/machines/phase_diag_machine.py +++ b/cubical/machines/phase_diag_machine.py @@ -2,7 +2,7 @@ # (c) 2017 Rhodes University & Jonathan S. Kenyon # http://github.com/ratt-ru/CubiCal # This code is distributed under the terms of GPLv2, see LICENSE.md for details -from __future__ import print_function + from cubical.machines.interval_gain_machine import PerIntervalGains import numpy as np from cubical.flagging import FL diff --git a/cubical/machines/slope_machine.py b/cubical/machines/slope_machine.py index 1e8a7f34..f1e4205b 100644 --- a/cubical/machines/slope_machine.py +++ b/cubical/machines/slope_machine.py @@ -19,13 +19,13 @@ def _normalize(x, dtype): return x -import __builtin__ +import builtins try: - __builtin__.profile + builtins.profile except AttributeError: # No line profiler, provide a pass-through version def profile(func): return func - __builtin__.profile = profile + builtins.profile = profile class PhaseSlopeGains(ParameterisedGains): @@ -94,7 +94,7 @@ def determine_diagonality(cls, options): @classmethod def get_full_kernel(cls, options, diag_gains): - from phase_diag_machine import PhaseDiagGains + from .phase_diag_machine import PhaseDiagGains return PhaseDiagGains.get_full_kernel(options, diag_gains) @staticmethod @@ -124,14 +124,14 @@ def importable_solutions(self): # defines solutions we can import from # Note that complex gain (as a derived parameter) is exported, but not imported - return { label: self.interval_grid for label in self._labels.iterkeys() } + return { label: self.interval_grid for label in self._labels.keys() } def export_solutions(self): """ Saves the solutions to a dict of {label: solutions,grids} items. """ solutions = ParameterisedGains.export_solutions(self) - for label, num in self._labels.iteritems(): + for label, num in self._labels.items(): solutions[label] = masked_array(self.slope_params[...,num,(0,1),(0,1)]), self.interval_grid if self.posterior_slope_error is not None: solutions[label+".err"] = masked_array(self.posterior_slope_error[..., num, :]), self.interval_grid @@ -152,7 +152,7 @@ def import_solutions(self, soldict): # delay will then be left at zero). loaded = False - for label, num in self._labels.iteritems(): + for label, num in self._labels.items(): value = soldict.get(label) if value is not None: self.slope_params[...,num,(0,1),(0,1)] = value diff --git a/cubical/madmax/flagger.py b/cubical/madmax/flagger.py index be0f2750..d7f3e9cd 100644 --- a/cubical/madmax/flagger.py +++ b/cubical/madmax/flagger.py @@ -13,13 +13,13 @@ # Conversion factor for sigma = SIGMA_MAD*mad SIGMA_MAD = 1.4826 -import __builtin__ +import builtins try: - __builtin__.profile + builtins.profile except AttributeError: # No line profiler, provide a pass-through version def profile(func): return func - __builtin__.profile = profile + builtins.profile = profile class Flagger(object): @@ -122,15 +122,14 @@ def report_carnage(self, absres, mad, baddies, flags_arr, method, max_label): warning, color = "WARNING: ", "red" frac = nbad / float(baddies.size) mode = "trial-" if self._trial else ("pretend-" if self._pretend else "") - print>> log(1, color), \ - "{warning}{max_label} {method} {mode}flags {nbad} ({frac:.2%}) visibilities".format(**locals()) + print("{warning}{max_label} {method} {mode}flags {nbad} ({frac:.2%}) visibilities".format(**locals()), file=log(1, color)) if log.verbosity() > 2 or self.GD['madmax']['plot']: per_bl = [] total_elements = float(n_tim * n_fre) interesting_fraction = self.GD['madmax']['plot-frac-above']*total_elements plot_explicit_baselines = [] - for p in xrange(n_ant): - for q in xrange(p + 1, n_ant): + for p in range(n_ant): + for q in range(p + 1, n_ant): n_flagged = baddies[:, :, p, q].sum() if n_flagged and n_flagged >= interesting_fraction: per_bl.append((n_flagged, p, q)) @@ -141,7 +140,7 @@ def report_carnage(self, absres, mad, baddies, flags_arr, method, max_label): per_bl_str = ["{} ({}m): {} ({:.2%})".format(self.metadata.baseline_name[p,q], int(self.metadata.baseline_length[p,q]), n_flagged, n_flagged/total_elements) for n_flagged, p, q in per_bl] - print>> log(3), "{} of which per baseline: {}".format(max_label, ", ".join(per_bl_str)) + print("{} of which per baseline: {}".format(max_label, ", ".join(per_bl_str)), file=log(3)) # plot, if asked to if self.GD['madmax']['plot']: baselines_to_plot = [] @@ -172,18 +171,18 @@ def report_carnage(self, absres, mad, baddies, flags_arr, method, max_label): else: filename = self.get_plot_filename() figure.savefig(filename, dpi=300) - print>>log(1),"{}: saving Mad Max flagging plot to {}".format(self.chunk_label,filename) + print("{}: saving Mad Max flagging plot to {}".format(self.chunk_label,filename), file=log(1)) pylab.close(figure) del figure made_plots = True except Exception as exc: traceback.print_exc() - print>>log(1, "red"), "WARNING: {}: exception {} raised while generating Mad Max waterfall plot for baseline {} ({})".format( - self.chunk_label, exc, blname, baseline_label) - print>>log(1), "Although harmless, this may indicate a problem with the data, or a bug in CubiCal." - print>>log(1), "Please see stack trace above, and report if you think this is a bug." + print("WARNING: {}: exception {} raised while generating Mad Max waterfall plot for baseline {} ({})".format( + self.chunk_label, exc, blname, baseline_label), file=log(1, "red")) + print("Although harmless, this may indicate a problem with the data, or a bug in CubiCal.", file=log(1)) + print("Please see stack trace above, and report if you think this is a bug.", file=log(1)) else: - print>> log(2),"{} {} abides".format(max_label, method) + print("{} {} abides".format(max_label, method), file=log(2)) return made_plots, nbad>0 @@ -221,24 +220,24 @@ def beyond_thunderdome(self, resid_arr, data_arr, model_arr, flags_arr, threshol shape1 = [mad.shape[0], mad.shape[1]*mad.shape[2]] + list(mad.shape[3:]) medmad = np.ma.median(mad.reshape(shape1), axis=1) # all this was worth it, just so I could type "mad.max()" as legit code - print>>log(2),"{} per-baseline MAD min {:.3g}, max {:.3g}, median {:.3g} to {:.3g}".format(max_label, mad.min(), mad.max(), medmad.min(), medmad.max()) + print("{} per-baseline MAD min {:.3g}, max {:.3g}, median {:.3g} to {:.3g}".format(max_label, mad.min(), mad.max(), medmad.min(), medmad.max()), file=log(2)) if log.verbosity() > 4: - for imod in xrange(n_mod): + for imod in range(n_mod): if self.mad_per_corr: for ic1,c1 in enumerate(self.metadata.feeds): for ic2,c2 in enumerate(self.metadata.feeds): - per_bl = [(mad[imod,p,q,ic1,ic2], p, q) for p in xrange(n_ant) - for q in xrange(p+1, n_ant) if not mad.mask[imod,p,q,ic1,ic2]] + per_bl = [(mad[imod,p,q,ic1,ic2], p, q) for p in range(n_ant) + for q in range(p+1, n_ant) if not mad.mask[imod,p,q,ic1,ic2]] per_bl = ["{} ({}m): {:.3g}".format(self.metadata.baseline_name[p,q], int(self.metadata.baseline_length[p,q]), x) for x, p, q in sorted(per_bl)[::-1]] - print>>log(4),"{} model {} {}{} MADs are {}".format(max_label, imod, - c1.upper(), c2.upper(), ", ".join(per_bl)) + print("{} model {} {}{} MADs are {}".format(max_label, imod, + c1.upper(), c2.upper(), ", ".join(per_bl)), file=log(4)) else: - per_bl = [(mad[imod,p,q,], p, q) for p in xrange(n_ant) - for q in xrange(p+1, n_ant) if not mad.mask[imod,p,q]] + per_bl = [(mad[imod,p,q,], p, q) for p in range(n_ant) + for q in range(p+1, n_ant) if not mad.mask[imod,p,q]] per_bl = ["{} ({}m) {:.3g}".format(self.metadata.baseline_name[p,q], int(self.metadata.baseline_length[p,q]), x) for x, p, q in sorted(per_bl)[::-1]] - print>>log(4),"{} model {} MADs are {}".format(max_label, imod, ", ".join(per_bl)) + print("{} model {} MADs are {}".format(max_label, imod, ", ".join(per_bl)), file=log(4)) made_plots = flagged_something = False @@ -298,34 +297,34 @@ def beyond_thunderdome(self, resid_arr, data_arr, model_arr, flags_arr, threshol if self.mad_per_corr: outflags = outflags.any(axis=(-1,-2)) if self.GD['madmax']['flag-ant'] and not self._pretend: - print>>log(0, "red"),"{} baselines {}flagged on mad residuals (--madmax-flag-ant 1)".format( - outflags.sum()/2, "trial-" if self._trial else "") + print("{} baselines {}flagged on mad residuals (--madmax-flag-ant 1)".format( + outflags.sum()/2, "trial-" if self._trial else ""), file=log(0, "red")) flags_arr[:,:,outflags] |= self.flagbit if model_arr is not None: model_arr[:,:,:,:,outflags,:,:] = 0 if data_arr is not None: data_arr[:,:,:,outflags,:,:] = 0 else: - print>>log(0, "red"),"{} baselines would have been flagged due to mad residuals (use --madmax-flag-ant to enable this)".format(outflags.sum()/2) + print("{} baselines would have been flagged due to mad residuals (use --madmax-flag-ant to enable this)".format(outflags.sum()/2), file=log(0, "red")) try: if self.GD['madmax']['plot'] == 'show': pylab.show() else: filename = self.get_plot_filename('mads') - print>>log(1),"{}: saving MAD distribution plot to {}".format(self.chunk_label,filename) + print("{}: saving MAD distribution plot to {}".format(self.chunk_label,filename), file=log(1)) figure.savefig(filename, dpi=300) - import cPickle + import pickle pickle_file = filename+".cp" - cPickle.dump((mad, medmad, med_thr, self.metadata, max_label), open(pickle_file, "w"), 2) - print>>log(1),"{}: pickling MAD distribution to {}".format(self.chunk_label, pickle_file) + pickle.dump((mad, medmad, med_thr, self.metadata, max_label), open(pickle_file, "w"), 2) + print("{}: pickling MAD distribution to {}".format(self.chunk_label, pickle_file), file=log(1)) pylab.close(figure) del figure except Exception as exc: traceback.print_exc() - print>> log(1,"red"), "WARNING: {}: exception {} raised while rendering Mad Max summary plot".format( - self.chunk_label, exc) - print>> log(1), "Although harmless, this may indicate a problem with the data, or a bug in CubiCal." - print>> log(1), "Please see stack trace above, and report if you think this is a bug." + print("WARNING: {}: exception {} raised while rendering Mad Max summary plot".format( + self.chunk_label, exc), file=log(1,"red")) + print("Although harmless, this may indicate a problem with the data, or a bug in CubiCal.", file=log(1)) + print("Please see stack trace above, and report if you think this is a bug.", file=log(1)) return flagged_something and not self._pretend \ No newline at end of file diff --git a/cubical/madmax/plots.py b/cubical/madmax/plots.py index 96d66087..e7e47f15 100644 --- a/cubical/madmax/plots.py +++ b/cubical/madmax/plots.py @@ -58,14 +58,14 @@ def make_baseline_mad_plot(mad, medmad, med_thr, metadata, max_label="", chunk_l outflags = np.zeros(mad.shape[1:],bool) # sort baselines by length and form up index list - baselines = [ (p,q) for p in xrange(n_ant) for q in xrange(p+1, n_ant) ] + baselines = [ (p,q) for p in range(n_ant) for q in range(p+1, n_ant) ] # sort arrays by baseline length indices_pq = sorted([(metadata.baseline_length[p, q], p, q) for p, q in baselines]) from cubical.madmax.flagger import SIGMA_MAD def make_antenna_mads(mad_threshold): - print>>log(3),"make_baseline_mad_plot: plotting antennas" + print("make_baseline_mad_plot: plotting antennas", file=log(3)) # compute per-antenna MAD if per_corr: ## again, wanted to do this @@ -76,7 +76,7 @@ def make_antenna_mads(mad_threshold): medant = np.ma.median(mad[0,...].reshape(shape1), axis=1) else: medant = np.ma.median(mad[0,...], axis=1) - antnum = np.ma.masked_array(xrange(n_ant), medant.mask) + antnum = np.ma.masked_array(range(n_ant), medant.mask) if not medant.mask.all(): medmed = np.ma.median(medant) madmed = np.ma.median(abs(medant-medmed)) @@ -90,7 +90,7 @@ def make_antenna_mads(mad_threshold): thresholds.append((mad_threshold, "red")) for thr,color in thresholds: pylab.axhline(medmed+thr*SIGMA_MAD*madmed, color=color, ls=':') - for p in xrange(n_ant): + for p in range(n_ant): if antnum.mask is np.ma.nomask or not antnum.mask[p]: pylab.axvline(antnum[p], color="0.9") color = "black" @@ -103,19 +103,19 @@ def make_antenna_mads(mad_threshold): pylab.ylabel("MAD residual over baselines") except Exception as exc: traceback.print_exc() - print>> log(1,"red"), "WARNING: {}: exception {} raised while generating Mad Max antenna-MAD plot".format( - chunk_label, exc) - print>> log(1), "Although harmless, this may indicate a problem with the data, or a bug in CubiCal." - print>> log(1), "Please see stack trace above, and report if you think this is a bug." + print("WARNING: {}: exception {} raised while generating Mad Max antenna-MAD plot".format( + chunk_label, exc), file=log(1,"red")) + print("Although harmless, this may indicate a problem with the data, or a bug in CubiCal.", file=log(1)) + print("Please see stack trace above, and report if you think this is a bug.", file=log(1)) if mad_threshold: antmask = medant > medmed + mad_threshold*SIGMA_MAD*madmed if antmask.any(): - print>>log(0,"red"), "{}: antennas {} have mad residuals, refer to Mad Max plots".format(max_label, - ",".join([metadata.antenna_name[p] for p,fl in enumerate(antmask) if fl])) + print("{}: antennas {} have mad residuals, refer to Mad Max plots".format(max_label, + ",".join([metadata.antenna_name[p] for p,fl in enumerate(antmask) if fl])), file=log(0,"red")) else: - print>>log(1),"{}: no antennas with mad residuals".format(max_label) + print("{}: no antennas with mad residuals".format(max_label), file=log(1)) outflags[antmask,:] = True outflags[:,antmask] = True if per_corr: @@ -130,7 +130,7 @@ def make_antenna_mads(mad_threshold): def make_baseline_mads(): # remake indices, since flagged (masked) baselines may have changed if per_corr: - indices = [(bl,p,q,c1,c2) for bl,p,q in indices_pq for c1 in xrange(n_cor) for c2 in xrange(n_cor)] + indices = [(bl,p,q,c1,c2) for bl,p,q in indices_pq for c1 in range(n_cor) for c2 in range(n_cor)] mask = [mad.mask[0,p,q,c1,c2] for _,p,q,c1,c2 in indices] blmad = np.ma.masked_array([(mad[0,p,q,c1,c2] or 0) for _,p,q,c1,c2 in indices],mask) else: @@ -144,7 +144,7 @@ def make_baseline_mads(): # for every baseline, compute local MMAD from cubical.madmax.flagger import SIGMA_MAD - print>>log(3),"make_baseline_mad_plot: computing LMMAD" + print("make_baseline_mad_plot: computing LMMAD", file=log(3)) lmmad = {} for i,(_,p,q,_,_) in enumerate(indices): if (p,q) not in lmmad: @@ -163,7 +163,7 @@ def make_baseline_mads(): lmmad_ad = np.ma.masked_array([abs((blmad1 or 0) - lmmad.get((p,q), 0)) for (_,p,q,_,_),blmad1 in zip(indices,blmad)], blmad.mask) lmmad_madmad = np.ma.median(lmmad_ad) - print>>log(3),"make_baseline_mad_plot: plotting baselines" + print("make_baseline_mad_plot: plotting baselines", file=log(3)) xlim = [0, 0] @@ -191,8 +191,8 @@ def make_baseline_mads(): pylab.text(0, med_thr[0], "threshold", color="black", ha='right', va='center', size='x-small') - for p in xrange(n_ant): - for q in xrange(p + 1, n_ant): + for p in range(n_ant): + for q in range(p + 1, n_ant): if not mad.mask[0, p, q].all(): uvdist = metadata.baseline_length[p, q] xlim[1] = max(xlim[1], uvdist) @@ -231,10 +231,10 @@ def make_baseline_mads(): pylab.title("{}: MAD residuals".format(max_label)) except Exception as exc: traceback.print_exc() - print>> log(1, "red"), "WARNING: {}: exception {} raised while generating Mad Max baseline-MAD plot".format( - chunk_label, exc) - print>> log(1), "Although harmless, this may indicate a problem with the data, or a bug in CubiCal." - print>> log(1), "Please see stack trace above, and report if you think this is a bug." + print("WARNING: {}: exception {} raised while generating Mad Max baseline-MAD plot".format( + chunk_label, exc), file=log(1, "red")) + print("Although harmless, this may indicate a problem with the data, or a bug in CubiCal.", file=log(1)) + print("Please see stack trace above, and report if you think this is a bug.", file=log(1)) figure = pylab.figure(figsize=(16, 10)) @@ -250,7 +250,7 @@ def make_baseline_mads(): pylab.subplot(2,2,3) make_antenna_mads(antenna_mad_threshold) - print>>log(3),"make_baseline_mad_plot: done" + print("make_baseline_mad_plot: done", file=log(3)) return outflags, figure diff --git a/cubical/main.py b/cubical/main.py index 0a0f9879..08bb7f4c 100644 --- a/cubical/main.py +++ b/cubical/main.py @@ -14,7 +14,7 @@ # logging.getLogger('vext').setLevel(logging.WARNING) ## -import cPickle +import pickle import os, os.path import sys import warnings @@ -71,18 +71,18 @@ def expand_templated_name(name, **keys): keys.update(_runtime_templates) keys.update(GD) # substitute recursively, but up to a limit - for i in xrange(10): + for i in range(10): name1 = name.format(**keys) if name1 == name: break name = name1 return name - except Exception, exc: - print>> log, "{}({})\n {}".format(type(exc).__name__, exc, traceback.format_exc()) + except Exception as exc: + print("{}({})\n {}".format(type(exc).__name__, exc, traceback.format_exc()), file=log) if name == name0: - print>> log, ModColor.Str("Error substituting '{}', see above".format(name)) + print(ModColor.Str("Error substituting '{}', see above".format(name)), file=log) else: - print>> log, ModColor.Str("Error substituting '{}' (derived from '{}'), see above".format(name, name0)) + print(ModColor.Str("Error substituting '{}' (derived from '{}'), see above".format(name, name0)), file=log) raise ValueError(name) from cubical.data_handler.ms_data_handler import MSDataHandler @@ -139,8 +139,8 @@ def main(debugging=False): try: if debugging: - print>> log, "initializing from cubical.last" - GD = cPickle.load(open("cubical.last")) + print("initializing from cubical.last", file=log) + GD = pickle.load(open("cubical.last")) basename = GD["out"]["name"] parser = None else: @@ -150,7 +150,7 @@ def main(debugging=False): if len(sys.argv) > 1 and not sys.argv[1][0].startswith('-'): custom_parset_file = sys.argv[1] - print>> log, "reading defaults from {}".format(custom_parset_file) + print("reading defaults from {}".format(custom_parset_file), file=log) try: parset = parsets.Parset(custom_parset_file) except: @@ -196,7 +196,7 @@ def main(debugging=False): # find unique output name, if needed if os.path.exists("{}/{}.log".format(dirname, basename)) and not GD["out"]["overwrite"]: - print>> log(0, "blue"), "{}/{}.log already exists, won't overwrite".format(dirname, basename) + print("{}/{}.log already exists, won't overwrite".format(dirname, basename), file=log(0, "blue")) dirname0, basename0 = dirname, basename N = -1 while os.path.exists("{}/{}.log".format(dirname, basename)): @@ -208,18 +208,18 @@ def main(debugging=False): # rename old directory, if we ended up manipulating the directory name if dirname != dirname0: os.rename(dirname0, dirname) - print>> log(0, "blue"), "saved previous {} to {}".format(dirname0, dirname) + print("saved previous {} to {}".format(dirname0, dirname), file=log(0, "blue")) dirname = dirname0 os.mkdir(dirname) if dirname != ".": basename = "{}/{}".format(dirname, basename) - print>> log(0, "blue"), "using {} as base for output files".format(basename) + print("using {} as base for output files".format(basename), file=log(0, "blue")) GD["out"]["name"] = basename # "GD" is a global defaults dict, containing options set up from parset + command line - cPickle.dump(GD, open("cubical.last", "w")) + pickle.dump(GD, open("cubical.last", "w")) # save parset with all settings. We refuse to clobber a parset with itself # (so e.g. "gocubical test.parset --Section-Option foo" does not overwrite test.parset) @@ -228,7 +228,7 @@ def main(debugging=False): os.path.samefile(save_parset, custom_parset_file): basename = "~" + basename save_parset = basename + ".parset" - print>> log, ModColor.Str("your --out-name would overwrite its own parset. Using {} instead.".format(basename)) + print(ModColor.Str("your --out-name would overwrite its own parset. Using {} instead.".format(basename)), file=log) parser.write_to_parset(save_parset) enable_pdb = GD["debug"]["pdb"] @@ -243,7 +243,7 @@ def main(debugging=False): logger.setGlobalLogVerbosity(GD["log"]["file-verbose"]) if not debugging: - print>>log, "started " + " ".join(sys.argv) + print("started " + " ".join(sys.argv), file=log) # disable matplotlib's tk backend if we're not going to be showing plots if GD['out']['plots'] =='show' or GD['madmax']['plot'] == 'show': @@ -251,10 +251,10 @@ def main(debugging=False): try: pylab.figure() pylab.close() - except Exception, exc: + except Exception as exc: import traceback - print>>log, ModColor.Str("Error initializing matplotlib: {}({})\n {}".format(type(exc).__name__, - exc, traceback.format_exc())) + print(ModColor.Str("Error initializing matplotlib: {}({})\n {}".format(type(exc).__name__, + exc, traceback.format_exc())), file=log) raise UserInputError("matplotlib can't connect to X11. Can't use --out-plots show or --madmax-plot show.") else: matplotlib.use("Agg") @@ -276,7 +276,7 @@ def main(debugging=False): # collect list of options from enabled Jones matrices if not len(jones_opts): raise UserInputError("No Jones terms are enabled") - print>> log, ModColor.Str("Enabling {}-Jones".format(",".join(sol_jones)), col="green") + print(ModColor.Str("Enabling {}-Jones".format(",".join(sol_jones)), col="green"), file=log) have_dd_jones = any([jo['dd-term'] for jo in jones_opts]) @@ -288,12 +288,12 @@ def main(debugging=False): if solver_type not in solver.SOLVERS: raise UserInputError("invalid setting --out-mode {}".format(solver_type)) solver_mode_name = solver.SOLVERS[solver_type].__name__.replace("_", " ") - print>>log,ModColor.Str("mode: {}".format(solver_mode_name), col='green') + print(ModColor.Str("mode: {}".format(solver_mode_name), col='green'), file=log) # these flags are used below to tweak the behaviour of gain machines and model loaders apply_only = solver.SOLVERS[solver_type].is_apply_only - print>>log(0),"solver is apply-only type: {}".format(apply_only) + print("solver is apply-only type: {}".format(apply_only), file=log(0)) load_model = solver.SOLVERS[solver_type].is_model_required - print>>log(0),"solver requires model: {}".format(load_model) + print("solver requires model: {}".format(load_model), file=log(0)) if load_model and not GD["model"]["list"]: raise UserInputError("--model-list must be specified") @@ -385,7 +385,7 @@ def main(debugging=False): if type(subdirs) is str: try: if ',' in subdirs: - subdirs = map(int, subdirs.split(",")) + subdirs = list(map(int, subdirs.split(","))) else: subdirs = eval("np.s_[{}]".format(subdirs)) except: @@ -398,7 +398,7 @@ def main(debugging=False): if out_of_range: raise UserInputError("--out-subtract-dirs {} out of range for {} model direction(s)".format( ",".join(map(str, out_of_range)), len(ms.model_directions))) - print>>log(0),"subtraction directions set to {}".format(subdirs) + print("subtraction directions set to {}".format(subdirs), file=log(0)) else: subdirs = slice(None) solver_opts["subtract-dirs"] = subdirs @@ -439,8 +439,8 @@ def main(debugging=False): if GD["dist"]["max-chunks"]: chunks_per_tile = max(GD["dist"]["max-chunks"], chunks_per_tile) - print>>log, "defining chunks (time {}, freq {}{})".format(GD["data"]["time-chunk"], GD["data"]["freq-chunk"], - ", also when {} jumps > {}".format(", ".join(chunk_by), jump) if chunk_by else "") + print("defining chunks (time {}, freq {}{})".format(GD["data"]["time-chunk"], GD["data"]["freq-chunk"], + ", also when {} jumps > {}".format(", ".join(chunk_by), jump) if chunk_by else ""), file=log) chunks_per_tile, tile_list = ms.define_chunk(GD["data"]["time-chunk"], GD["data"]["rebin-time"], GD["data"]["freq-chunk"], @@ -454,7 +454,7 @@ def main(debugging=False): # single-chunk implies single-tile if single_tile >= 0: tile_list = tile_list[single_tile:single_tile+1] - print>> log(0, "blue"), "--data-single-tile {} set, will process only the one tile".format(single_tile) + print("--data-single-tile {} set, will process only the one tile".format(single_tile), file=log(0, "blue")) elif single_chunk: match = re.match("D([0-9]+)T([0-9]+)", single_chunk) if not match: @@ -466,8 +466,8 @@ def main(debugging=False): if single_tile_rc: tile, rc = single_tile_rc tile_list = [tile] - print>> log(0, "blue"), "--data-single-chunk {} in {}, rows {}:{}".format( - single_chunk, tile.label, min(rc.rows0), max(rc.rows0)+1) + print("--data-single-chunk {} in {}, rows {}:{}".format( + single_chunk, tile.label, min(rc.rows0), max(rc.rows0)+1), file=log(0, "blue")) else: raise ValueError("--data-single-chunk {}: chunk with this ID not found".format(single_chunk)) @@ -478,26 +478,26 @@ def main(debugging=False): stats_dict = workers.run_process_loop(ms, tile_list, load_model, single_chunk, solver_type, solver_opts, debug_opts) - print>>log, ModColor.Str("Time taken for {}: {} seconds".format(solver_mode_name, time() - t0), col="green") + print(ModColor.Str("Time taken for {}: {} seconds".format(solver_mode_name, time() - t0), col="green"), file=log) # print flagging stats - print>>log, ModColor.Str("Flagging stats: ",col="green") + " ".join(ms.get_flag_counts()) + print(ModColor.Str("Flagging stats: ",col="green") + " ".join(ms.get_flag_counts()), file=log) if not apply_only: # now summarize the stats - print>> log, "computing summary statistics" + print("computing summary statistics", file=log) st = SolverStats(stats_dict) filename = basename + ".stats.pickle" st.save(filename) - print>> log, "saved summary statistics to %s" % filename + print("saved summary statistics to %s" % filename, file=log) print_stats = GD["log"]["stats"] if print_stats: - print>> log(0), "printing some summary statistics below" + print("printing some summary statistics below", file=log(0)) thresholds = [] for thr in GD["log"]["stats-warn"].split(","): field, value = thr.split(":") thresholds.append((field, float(value))) - print>>log(0), " highlighting {}>{}".format(field, float(value)) + print(" highlighting {}>{}".format(field, float(value)), file=log(0)) if print_stats == "all": print_stats = st.get_notrivial_chunk_statfields() else: @@ -506,7 +506,7 @@ def main(debugging=False): if stats[0] != "{": stats = "{{{}}}".format(stats) lines = st.format_chunk_stats(stats, threshold=thresholds) - print>>log(0)," summary stats for {}:\n {}".format(stats, "\n ".join(lines)) + print(" summary stats for {}:\n {}".format(stats, "\n ".join(lines)), file=log(0)) if GD["postmortem"]["enable"]: # flag based on summary stats @@ -515,7 +515,7 @@ def main(debugging=False): if flag3 is not None: st.apply_flagcube(flag3) if GD["flags"]["save"] and flag3.any() and not GD["data"]["single-chunk"]: - print>>log,"regenerating output flags based on post-solution flagging" + print("regenerating output flags based on post-solution flagging", file=log) flagcol = ms.flag3_to_col(flag3) ms.save_flags(flagcol) @@ -524,14 +524,14 @@ def main(debugging=False): import cubical.plots try: cubical.plots.make_summary_plots(st, ms, GD, basename) - except Exception, exc: + except Exception as exc: if GD["debug"]["escalate-warnings"]: raise import traceback - print>> ModColor.Str("An error has occurred while making summary plots: {}({})\n {}".format(type(exc).__name__, + print(file=ModColor.Str("An error has occurred while making summary plots: {}({})\n {}".format(type(exc).__name__, exc, - traceback.format_exc())) - print>>log, ModColor.Str("This is not fatal, but should be reported (and your plots have gone missing!)") + traceback.format_exc()))) + print(ModColor.Str("This is not fatal, but should be reported (and your plots have gone missing!)"), file=log) # make BBC plots if solver.ifrgain_machine and solver.ifrgain_machine.is_computing() and GD["bbc"]["plot"] and GD["out"]["plots"]: @@ -543,24 +543,24 @@ def main(debugging=False): else: try: cubical.plots.ifrgains.make_ifrgain_plots(solver.ifrgain_machine.reload(), ms, GD, basename) - except Exception, exc: + except Exception as exc: import traceback - print>> ModColor.Str("An error has occurred while making BBC plots: {}({})\n {}".format(type(exc).__name__, + print(file=ModColor.Str("An error has occurred while making BBC plots: {}({})\n {}".format(type(exc).__name__, exc, - traceback.format_exc())) - print>>log, ModColor.Str("This is not fatal, but should be reported (and your plots have gone missing!)") + traceback.format_exc()))) + print(ModColor.Str("This is not fatal, but should be reported (and your plots have gone missing!)"), file=log) ms.close() - print>>log, ModColor.Str("completed successfully", col="green") + print(ModColor.Str("completed successfully", col="green"), file=log) - except Exception, exc: + except Exception as exc: if type(exc) is UserInputError: - print>> log, ModColor.Str(exc) + print(ModColor.Str(exc), file=log) else: import traceback - print>>log, ModColor.Str("Exiting with exception: {}({})\n {}".format(type(exc).__name__, - exc, traceback.format_exc())) + print(ModColor.Str("Exiting with exception: {}({})\n {}".format(type(exc).__name__, + exc, traceback.format_exc())), file=log) if enable_pdb and not type(exc) is UserInputError: from cubical.tools import pdb exc, value, tb = sys.exc_info() diff --git a/cubical/param_db.py b/cubical/param_db.py index 9ea99096..0da5bb15 100644 --- a/cubical/param_db.py +++ b/cubical/param_db.py @@ -11,7 +11,7 @@ log = logger.getLogger("param_db") #from database.pickled_db import PickledDatabase -from database.casa_db_adaptor import casa_db_adaptor +from .database.casa_db_adaptor import casa_db_adaptor def create(filename, metadata={}, backup=True): """ Creates a new parameter database. @@ -56,7 +56,7 @@ def load(filename): if __name__ == "__main__": log.verbosity(2) - print "Creating test DB" + print("Creating test DB") db = create("test.db") db.define_param("G", np.float64, ["ant", "time", "freq", "corr"], interpolation_axes=["time", "freq"]) @@ -69,14 +69,14 @@ def load(filename): db.add_chunk("B", arr, grid=dict(freq=np.arange(i0,i1))) db.close() - print "Loading test DB" + print("Loading test DB") db = load("test.db") - print db.names() + print(db.names()) G = db['G'] B = db['B'] - print "G", db["G"].axis_labels, db["G"].shape - print "B", db["B"].axis_labels, db["B"].shape - print "G", G.get_slice(ant=0,corr=0) - print "B", G.get_slice(ant=0,corr=0) - print "Gint", G.reinterpolate(time=np.arange(0,10,.5),freq=np.arange(0,10,1.5)) + print("G", db["G"].axis_labels, db["G"].shape) + print("B", db["B"].axis_labels, db["B"].shape) + print("G", G.get_slice(ant=0,corr=0)) + print("B", G.get_slice(ant=0,corr=0)) + print("Gint", G.reinterpolate(time=np.arange(0,10,.5),freq=np.arange(0,10,1.5))) diff --git a/cubical/plots/__init__.py b/cubical/plots/__init__.py index fb2fce8d..a60afda4 100644 --- a/cubical/plots/__init__.py +++ b/cubical/plots/__init__.py @@ -35,5 +35,5 @@ def make_summary_plots(st, ms, GD, basename): """ stats.make_stats_plots(st, GD, basename, ms.metadata) -import ifrgains, stats +from . import ifrgains, stats diff --git a/cubical/plots/ifrgains.py b/cubical/plots/ifrgains.py index a95d9e96..211b4536 100644 --- a/cubical/plots/ifrgains.py +++ b/cubical/plots/ifrgains.py @@ -64,7 +64,7 @@ def _is_unity(rr, ll): #def make_ifrgain_plots(filename="$STEFCAL_DIFFGAIN_SAVE", prefix="IG", feed="$IFRGAIN_PLOT_FEED", msname="$MS"): def make_ifrgain_plots(ig, ms, GD, basename): """Makes a set of ifrgain plots from the specified saved file.""" - print>>log(0),"generating plots for suggested baseline-based corrections (BBCs)" + print("generating plots for suggested baseline-based corrections (BBCs)", file=log(0)) metadata = ms.metadata import pylab @@ -73,13 +73,13 @@ def save_figure(name, width, height): pylab.gcf().set_size_inches(min(width, 10000 / DPI), min(height, 10000 / DPI)) filename = "{}.{}.png".format(basename, name) pylab.savefig(filename, dpi=DPI) - print>> log, "saved plot " + filename + print("saved plot " + filename, file=log) if GD["out"]["plots"] == "show": pylab.show() pylab.figure() # load baseline info, if MS is available - antpos = zip(ms.antnames, ms.antpos) + antpos = list(zip(ms.antnames, ms.antpos)) # make dictionary of IFR name: baseline length baseline = { metadata.baseline_name[p,q]: metadata.baseline_length[p,q] for p in range(ms.nants) for q in range(p+1, ms.nants) } @@ -107,7 +107,7 @@ def plot_baseline(content, baseline, title, feeds): for l, (x, xe), (y, ye) in content: b = baseline.get(l, None) if b is None: - print>>log(0, "red"),"baseline '{}' not found in MS ANTENNA table".format(l) + print("baseline '{}' not found in MS ANTENNA table".format(l), file=log(0, "red")) else: lab += ["%s:%s" % (l, feeds[0]), "%s:%s" % (l, feeds[1])] col += ["blue", "red"] @@ -200,8 +200,8 @@ def plot_ants(content, title): # collect a list of valid RR/LL and RL/LR pairs (i.e. ones not all unity) valid_igs = [] ifr_pairs = {} - for p in xrange(nant): - for q in xrange(p+1, nant): + for p in range(nant): + for q in range(p+1, nant): ifrname = ms.metadata.baseline_name[p,q] rr = ig[:, p, q, i1, j1] ll = ig[:, p, q, i2, j2] diff --git a/cubical/plots/stats.py b/cubical/plots/stats.py index 473f6ec9..799fd361 100644 --- a/cubical/plots/stats.py +++ b/cubical/plots/stats.py @@ -20,7 +20,7 @@ def save_figure(name, width, height): pylab.gcf().set_size_inches(min(width, 10000 / DPI), min(height, 10000 / DPI)) filename = "{}.{}.png".format(basename, name) pylab.savefig(filename, dpi=DPI) - print>> log, "saved plot " + filename + print("saved plot " + filename, file=log) if GD["out"]["plots"] == "show": pylab.show() pylab.figure() @@ -68,7 +68,7 @@ def save_figure(name, width, height): noise = np.sqrt(st.chanant.dr2) noise[noise == 0] = np.inf nf, nant = noise.shape - for ant in xrange(nant): + for ant in range(nant): pylab.plot(noise[:, ant], 'o-') for x in pylab.xticks()[0]: pylab.axvline(x, c="grey", lw=.5, ls=':', zorder=999) @@ -77,7 +77,7 @@ def save_figure(name, width, height): pylab.ylabel("noise") pylab.subplot(122) make_antenna_xaxis(metadata.antenna_name) - for chan in xrange(nf): + for chan in range(nf): pylab.plot(noise[chan, :], 'o-') pylab.title("Noise (colour: channel)") pylab.ylabel("noise") @@ -88,7 +88,7 @@ def save_figure(name, width, height): chi2 = st.chanant.chi2 chi2[chi2 == 0] = np.inf nf, nant = chi2.shape - for ant in xrange(nant): + for ant in range(nant): pylab.plot(chi2[:, ant], 'o-') for x in pylab.xticks()[0]: pylab.axvline(x, c="grey", lw=.5, ls=':', zorder=999) @@ -97,7 +97,7 @@ def save_figure(name, width, height): pylab.ylabel("$\chi^2$") pylab.subplot(122) make_antenna_xaxis(metadata.antenna_name) - for chan in xrange(nf): + for chan in range(nf): pylab.plot(chi2[chan, :], 'o-') pylab.title("Chi-sq (colour: channel)") pylab.ylabel("$\chi^2$") diff --git a/cubical/solver.py b/cubical/solver.py index 73e67197..e426ba61 100644 --- a/cubical/solver.py +++ b/cubical/solver.py @@ -5,7 +5,7 @@ """ Implements the solver loop. """ -from __future__ import print_function + import numpy as np import os, os.path import traceback @@ -20,7 +20,7 @@ #warnings.simplefilter('error', UserWarning) #warnings.simplefilter('error', RuntimeWarning) -from madmax.flagger import Flagger +from .madmax.flagger import Flagger log = logger.getLogger("solver") #log.verbosity(2) @@ -40,13 +40,13 @@ # set to true for old-style (version <= 1.2.1) weight averaging, where 2x2 weights are collapsed into a single number legacy_version12_weights = False -import __builtin__ +import builtins try: - __builtin__.profile + builtins.profile except AttributeError: # No line profiler, provide a pass-through version def profile(func): return func - __builtin__.profile = profile + builtins.profile = profile @profile @@ -104,7 +104,7 @@ def get_flagging_stats(): """Returns a string describing per-flagset statistics""" fstats = [] - for flag, mask in FL.categories().iteritems(): + for flag, mask in FL.categories().items(): n_flag = ((flags_arr & mask) != 0).sum() if n_flag: fstats.append("{}:{}({:.2%})".format(flag, n_flag, n_flag/float(flags_arr.size))) @@ -607,7 +607,7 @@ def finalize(self, corr_vis): if self.stats.chunk.num_sol_flagged: # also for up message with flagging stats fstats = [] - for flagname, mask in FL.categories().iteritems(): + for flagname, mask in FL.categories().items(): if mask != FL.MISSING: n_flag, n_tot = self.gm.num_gain_flags(mask) if n_flag: @@ -855,7 +855,7 @@ def run_solver(solver_type, itile, chunk_key, sol_opts, debug_opts): return solver_machine.stats - except Exception, exc: + except Exception as exc: log.error("Solver for tile {} chunk {} failed with exception: {}".format(itile, label, exc)) log.print(traceback.format_exc()) raise diff --git a/cubical/statistics.py b/cubical/statistics.py index 327adea6..595f2959 100644 --- a/cubical/statistics.py +++ b/cubical/statistics.py @@ -8,7 +8,7 @@ import math import numpy as np -import cPickle +import pickle from cubical.tools import logger from cubical.tools import ModColor @@ -97,7 +97,7 @@ def save(self, filename): Name for pickled file. """ - cPickle.dump( + pickle.dump( (self.chanant, self.timeant, self.timechan, self.chunk), open(filename, 'w'), 2) def load(self, fileobj): @@ -105,7 +105,7 @@ def load(self, fileobj): Loads contents from file object """ - self.chanant, self.timeant, self.timechan, self.chunk = cPickle.load(fileobj) + self.chanant, self.timeant, self.timechan, self.chunk = pickle.load(fileobj) def estimate_noise (self, data, flags, residuals=False): """ @@ -207,14 +207,14 @@ def estimate_noise (self, data, flags, residuals=False): def add_records(recarray, recarray2): """ Adds two record-type arrays together. """ - for field in recarray.dtype.fields.iterkeys(): + for field in recarray.dtype.fields.keys(): recarray[field] += recarray2[field] @staticmethod def normalize_records(recarray): """ Normalizes record-type arrays by dividing each field 'X' by the field 'Xn'. """ - for field in recarray.dtype.fields.iterkeys(): + for field in recarray.dtype.fields.keys(): if field[-1] != 'n': nval = recarray[field+'n'] mask = nval!=0 @@ -234,8 +234,8 @@ def _concatenate(self, stats): # Get lists of unique time and channel indices occurring in the dict. - times = sorted(set([time for time, _ in stats.iterkeys()])) - chans = sorted(set([chan for _, chan in stats.iterkeys()])) + times = sorted(set([time for time, _ in stats.keys()])) + chans = sorted(set([chan for _, chan in stats.keys()])) # Concatenate and add up cumulative stats. @@ -337,16 +337,16 @@ def apply_flagcube(self, flag3): n_tim, n_ddid, n_fre = flag3.shape flag3 = flag3.reshape((n_tim, n_ddid*n_fre)) - FIELDS = self.timeant.dtype.fields.keys() + FIELDS = list(self.timeant.dtype.fields.keys()) flagged_times = flag3.all(axis=1) flagged_chans = flag3.all(axis=0) - print>>log,"adjusting statistics based on output flags" - print>>log," {:.2%} of all timeslots are flagged".format( - flagged_times.sum()/float(flagged_times.size)) - print>>log," {:.2%} of all channels are flagged".format( - flagged_chans.sum()/float(flagged_chans.size)) + print("adjusting statistics based on output flags", file=log) + print(" {:.2%} of all timeslots are flagged".format( + flagged_times.sum()/float(flagged_times.size)), file=log) + print(" {:.2%} of all channels are flagged".format( + flagged_chans.sum()/float(flagged_chans.size)), file=log) for field in FIELDS: self.chanant[field][flagged_chans, :] = 0 diff --git a/cubical/tools/ClassPrint.py b/cubical/tools/ClassPrint.py index 00fa1952..9d5abce7 100644 --- a/cubical/tools/ClassPrint.py +++ b/cubical/tools/ClassPrint.py @@ -8,7 +8,7 @@ import os import sys -import ModColor +from . import ModColor class ClassPrint(): def __init__(self,HW=20,quote='"'): @@ -31,7 +31,7 @@ def Print(self,par,value,value2=None,dest=sys.stdin): valueOut=value else: valueOut="%s%s"%(value.ljust(self.WV0),(""" "%s" """%value2).rjust(self.WV1)) - print>>dest,"%s = %s"%(parout,valueOut) + print("%s = %s"%(parout,valueOut), file=dest) def Print2(self,par,value,helpit,col="white"): WidthTerm=self.getWidth() @@ -45,16 +45,16 @@ def Print2(self,par,value,helpit,col="white"): helpit="Help yourself" Shelp="%s"%helpit if WidthHelp<0: - print self.proto%(Spar,SFill,Sval)+Shelp + print(self.proto%(Spar,SFill,Sval)+Shelp) return Lhelp=len(str(helpit)) - listStrHelp=range(0,Lhelp,WidthHelp) + listStrHelp=list(range(0,Lhelp,WidthHelp)) if listStrHelp[-1]!=Lhelp: listStrHelp.append(Lhelp) - print self.proto%(Spar,SFill,Sval)+Shelp[0:WidthHelp] + print(self.proto%(Spar,SFill,Sval)+Shelp[0:WidthHelp]) for i in range(1,len(listStrHelp)-1): parout="%s: %s"%(" "*(self.LeftW-2),Shelp[listStrHelp[i]:listStrHelp[i+1]]) - print parout + print(parout) diff --git a/cubical/tools/ModColor.py b/cubical/tools/ModColor.py index 7728ea2c..56cdcf56 100644 --- a/cubical/tools/ModColor.py +++ b/cubical/tools/ModColor.py @@ -30,7 +30,7 @@ def Str(strin0,col="red",Bold=True): ss = _color_dict.get(col) if ss is None: - raise ValueError,"unknown color '{}'".format(col) + raise ValueError("unknown color '{}'".format(col)) ss="%s%s%s"%(ss,strin,ENDC) if Bold: ss="%s%s%s"%(bold,ss,nobold) @@ -43,12 +43,12 @@ def Sep(strin=None,D=1): return Str(Separator%(strin)) def Title(strin,Big=False): - print - print - if Big: print Sep(strin,D=0) - print Sep(strin) - if Big: print Sep(strin,D=0) - print + print() + print() + if Big: print(Sep(strin,D=0)) + print(Sep(strin)) + if Big: print(Sep(strin,D=0)) + print() def disable(): HEADER = '' diff --git a/cubical/tools/NpShared.py b/cubical/tools/NpShared.py index f11f72d9..52a89cab 100644 --- a/cubical/tools/NpShared.py +++ b/cubical/tools/NpShared.py @@ -9,8 +9,8 @@ #import sharedarray.SharedArray as SharedArray import SharedArray -import ModColor -import logger +from . import ModColor +from . import logger import traceback log = logger.getLogger("NpShared") import os.path @@ -38,7 +38,7 @@ def CreateShared(Name, shape, dtype): try: a = SharedArray.create(Name, shape, dtype=dtype) except OSError: - print>> log, ModColor.Str("File %s exists, deleting" % Name) + print(ModColor.Str("File %s exists, deleting" % Name), file=log) DelArray(Name) a = SharedArray.create(Name, shape, dtype=dtype) return a @@ -63,7 +63,7 @@ def Lock (array): try: SharedArray.mlock(array) except: - print>> log, "Warning: Cannot lock memory. Try updating your kernel security settings." + print("Warning: Cannot lock memory. Try updating your kernel security settings.", file=log) _locking = False def Unlock (array): @@ -72,7 +72,7 @@ def Unlock (array): try: SharedArray.munlock(array) except: - print>> log, "Warning Cannot unlock memory. Try updating your kernel security settings." + print("Warning Cannot unlock memory. Try updating your kernel security settings.", file=log) _locking = False @@ -101,7 +101,7 @@ def GiveArray(Name): # print "Exception for key [%s]:"%Name # print " %s"%(str(e)) # print - print "Error loading",Name + print("Error loading",Name) traceback.print_exc() return None @@ -116,13 +116,13 @@ def Exists(Name): def DicoToShared(Prefix, Dico, DelInput=False): DicoOut = {} - print>>log, ModColor.Str("DicoToShared: start [prefix = %s]" % Prefix) - for key in Dico.keys(): + print(ModColor.Str("DicoToShared: start [prefix = %s]" % Prefix), file=log) + for key in list(Dico.keys()): if not isinstance(Dico[key], np.ndarray): continue # print "%s.%s"%(Prefix,key) ThisKeyPrefix = "%s.%s" % (Prefix, key) - print>>log, ModColor.Str(" %s -> %s" % (key, ThisKeyPrefix)) + print(ModColor.Str(" %s -> %s" % (key, ThisKeyPrefix)), file=log) ar = Dico[key] Shared = ToShared(ThisKeyPrefix, ar) DicoOut[key] = Shared @@ -132,7 +132,7 @@ def DicoToShared(Prefix, Dico, DelInput=False): if DelInput: del(Dico) - print>>log, ModColor.Str("DicoToShared: done") + print(ModColor.Str("DicoToShared: done"), file=log) #print ModColor.Str("DicoToShared: done") return DicoOut @@ -140,7 +140,7 @@ def DicoToShared(Prefix, Dico, DelInput=False): def SharedToDico(Prefix): - print>>log, ModColor.Str("SharedToDico: start [prefix = %s]" % Prefix) + print(ModColor.Str("SharedToDico: start [prefix = %s]" % Prefix), file=log) Lnames = ListNames() keys = [Name for Name in Lnames if Prefix in Name] if len(keys) == 0: @@ -148,13 +148,13 @@ def SharedToDico(Prefix): DicoOut = {} for Sharedkey in keys: key = Sharedkey.split(".")[-1] - print>>log, ModColor.Str(" %s -> %s" % (Sharedkey, key)) + print(ModColor.Str(" %s -> %s" % (Sharedkey, key)), file=log) Shared = GiveArray(Sharedkey) if isinstance(Shared, type(None)): - print>>log, ModColor.Str(" None existing key %s" % (key)) + print(ModColor.Str(" None existing key %s" % (key)), file=log) return None DicoOut[key] = Shared - print>>log, ModColor.Str("SharedToDico: done") + print(ModColor.Str("SharedToDico: done"), file=log) return DicoOut @@ -166,32 +166,32 @@ def PackListArray(Name, LArray): DelArray(Name) NArray = len(LArray) - ListNDim = [len(LArray[i].shape) for i in xrange(len(LArray))] + ListNDim = [len(LArray[i].shape) for i in range(len(LArray))] NDimTot = np.sum(ListNDim) # [NArray,NDim0...NDimN,shape0...shapeN,Arr0...ArrN] dS = LArray[0].dtype TotSize = 0 - for i in xrange(NArray): + for i in range(NArray): TotSize += LArray[i].size S = SharedArray.create(Name, (1+NArray+NDimTot+TotSize,), dtype=dS) S[0] = NArray idx = 1 # write ndims - for i in xrange(NArray): + for i in range(NArray): S[idx] = ListNDim[i] idx += 1 # write shapes - for i in xrange(NArray): + for i in range(NArray): ndim = ListNDim[i] A = LArray[i] S[idx:idx+ndim] = A.shape idx += ndim # write arrays - for i in xrange(NArray): + for i in range(NArray): A = LArray[i] S[idx:idx+A.size] = A.ravel() idx += A.size @@ -205,13 +205,13 @@ def UnPackListArray(Name): # read ndims ListNDim = [] - for i in xrange(NArray): + for i in range(NArray): ListNDim.append(np.int32(S[idx].real)) idx += 1 # read shapes ListShapes = [] - for i in xrange(NArray): + for i in range(NArray): ndim = ListNDim[i] shape = np.int32(S[idx:idx+ndim].real) ListShapes.append(shape) @@ -219,7 +219,7 @@ def UnPackListArray(Name): # read values ListArray = [] - for i in xrange(NArray): + for i in range(NArray): shape = ListShapes[i] size = np.prod(shape) A = S[idx:idx+size].reshape(shape) @@ -236,19 +236,19 @@ def PackListSquareMatrix(shared_dict, Name, LArray): NArray = len(LArray) dtype = LArray[0].dtype TotSize = 0 - for i in xrange(NArray): + for i in range(NArray): TotSize += LArray[i].size # [N,shape0...shapeN,Arr0...ArrN] S = shared_dict.addSharedArray(Name, (TotSize+NArray+1,), dtype=dtype) S[0] = NArray idx = 1 - for i in xrange(NArray): + for i in range(NArray): A = LArray[i] S[idx] = A.shape[0] idx += 1 - for i in xrange(NArray): + for i in range(NArray): A = LArray[i] S[idx:idx+A.size] = A.ravel() idx += A.size @@ -262,13 +262,13 @@ def UnPackListSquareMatrix(Array): idx = 1 ShapeArray = [] - for i in xrange(NArray): + for i in range(NArray): ShapeArray.append(np.int32(S[idx].real)) idx += 1 - print>>log, ShapeArray + print(ShapeArray, file=log) - for i in xrange(NArray): + for i in range(NArray): shape = np.int32(ShapeArray[i].real) size = shape**2 A = S[idx:idx+size].reshape((shape, shape)) diff --git a/cubical/tools/dynoptparse.py b/cubical/tools/dynoptparse.py index 2bec8291..cc82018d 100644 --- a/cubical/tools/dynoptparse.py +++ b/cubical/tools/dynoptparse.py @@ -9,10 +9,10 @@ import sys, re, optparse from collections import OrderedDict -import parsets -import ModColor -import ClassPrint -import logger +from . import parsets +from . import ModColor +from . import ClassPrint +from . import logger log = logger.getLogger("dynoptparse") @@ -51,7 +51,7 @@ def __init__(self,usage='Usage: %prog ', version='%prog version 1.0', def _make_parser(self, parser_class=optparse.OptionParser): parser = parser_class(**self._parser_kws) - for label, (title, option_list) in self._groups.iteritems(): + for label, (title, option_list) in self._groups.items(): # create group, unless label is None group = optparse.OptionGroup(parser, title) if label is not None else None # populate group, or else top level @@ -125,7 +125,7 @@ def read_input(self): parser = self._make_parser() self._options, self._arguments = parser.parse_args() # propagate results back into defaults dict - for key, value in vars(self._options).iteritems(): + for key, value in vars(self._options).items(): group, name = self._parse_dest_key(key) group_dict = self._defaults[group] attrs = self._attributes.get(group, {}).get(name, {}) @@ -146,9 +146,9 @@ def get_config(self): def write_to_parset(self, parset_filename): with open(parset_filename, "w") as f: - for group, group_dict in self._defaults.iteritems(): + for group, group_dict in self._defaults.items(): f.write('[{}]\n'.format(group)) - for name, value in group_dict.iteritems(): + for name, value in group_dict.items(): attrs = self._attributes.get(group, {}).get(name, {}) if not attrs.get('cmdline_only') and not attrs.get('alias_of'): f.write('{} = {}\n'.format(name, value)) @@ -156,26 +156,26 @@ def write_to_parset(self, parset_filename): def print_config(self, skip_groups=[], dest=sys.stdout): P = ClassPrint.ClassPrint(HW=50) - print>>dest, ModColor.Str(" Selected Options:") - for group, group_dict in self._defaults.iteritems(): + print(ModColor.Str(" Selected Options:"), file=dest) + for group, group_dict in self._defaults.items(): if group in skip_groups or '_NameTemplate' in group_dict: continue title = self._groups.get(group, (group, None))[0] - print>>dest, ModColor.Str("[{}] {}".format(group, title), col="green") + print(ModColor.Str("[{}] {}".format(group, title), col="green"), file=dest) - for name, value in group_dict.iteritems(): + for name, value in group_dict.items(): if name[0] != "_": # skip "internal" values such as "_Help" attrs = self._attributes.get(group).get(name, {}) if not attrs.get('alias_of') and not attrs.get('cmdline_only') and not attrs.get('no_print'): # and V!="": P.Print(name, value, dest=dest) - print>>dest + print(file=dest) def _add_section(self, section, values, attrs): # "_Help" value in each section is its documentation string help = values.get("_Help", section) self.start_group(help, section) - for name, value in values.iteritems(): + for name, value in values.items(): if not name[0] == "_" and not attrs.get(name, {}).get("no_cmdline"): section_template = self._templated_sections.get((section, name)) if section_template: @@ -191,12 +191,12 @@ def _instantiate_section_template_callback(self, option, opt_str, value, parser, # store value in parser if parser is not None: setattr(parser.values, option.dest, value) - print>>log(2),"callback invoked for {}".format(value) + print("callback invoked for {}".format(value), file=log(2)) # get template contents if type(value) is str: value = value.split(",") elif type(value) is not list: - raise TypeError,"list or string expected for {}, got {}".format(opt_str, type(value)) + raise TypeError("list or string expected for {}, got {}".format(opt_str, type(value))) for num, label in enumerate(value): substitutions = dict(LABEL=label, NUM=num) # init values from templated section @@ -204,7 +204,7 @@ def _instantiate_section_template_callback(self, option, opt_str, value, parser, # section name is templated section = values["_NameTemplate"].format(**substitutions).lower() if section in self._instantiated_sections: - print>> log(2), "section {} already exists".format(section) + print("section {} already exists".format(section), file=log(2)) continue # if section is already instatiated in the parset, update if section in self._defaults: @@ -224,7 +224,7 @@ def _instantiate_section_template_callback(self, option, opt_str, value, parser, values["_Templated"] = True attrs["_Templated"] = dict(no_cmdline=True, no_print=True) # add to parser - print>> log(2), "adding section {}".format(section) + print("adding section {}".format(section), file=log(2)) self._add_section(section, values, attrs) def _init_from_defaults(self): @@ -237,11 +237,11 @@ def _init_from_defaults(self): # split defaults into "regular" sections and "templated" sections normal_sections = [] - for section, values in self._defaults.iteritems(): + for section, values in self._defaults.items(): if '_NameTemplate' in values: match = re.match("^--(.+)-(.+)$", values["_ExpandedFrom"]) if not match: - raise ValueError,"Unrecognized _ExpandedFrom item in [{}]".format(section) + raise ValueError("Unrecognized _ExpandedFrom item in [{}]".format(section)) sec, var = match.groups() self._templated_sections[sec, var] = section else: diff --git a/cubical/tools/logger.py b/cubical/tools/logger.py index 3035468b..825479d1 100644 --- a/cubical/tools/logger.py +++ b/cubical/tools/logger.py @@ -5,10 +5,10 @@ # This module has been adapted from the DDFacet package, # (c) Cyril Tasse et al., see http://github.com/saopicc/DDFacet -from __future__ import print_function + import logging, logging.handlers, os, re, sys, multiprocessing -import ModColor +from . import ModColor # dict of logger wrappers created by the application _loggers = {} @@ -29,7 +29,7 @@ def logToFile(filename, append=False): _file_handler.setLevel(logging.DEBUG) _file_handler.setFormatter(_logfile_formatter) # set it as the target for the existing wrappers' handlers - for wrapper in _loggers.itervalues(): + for wrapper in _loggers.values(): wrapper.logfile_handler.setTarget(_file_handler) def getLogFilename(): diff --git a/cubical/tools/parsets.py b/cubical/tools/parsets.py index 507b056e..01ec13bd 100644 --- a/cubical/tools/parsets.py +++ b/cubical/tools/parsets.py @@ -6,7 +6,7 @@ # This module has been adapted from the DDFacet package, # (c) Cyril Tasse et al., see http://github.com/saopicc/DDFacet -import ConfigParser +import configparser from collections import OrderedDict import re @@ -141,9 +141,9 @@ def __init__(self, filename=None): def update_values (self, other, other_filename=''): """Updates this Parset with keys found in other parset. other_filename is only needed for error messages.""" - for secname, secvalues in other.value_dict.iteritems(): + for secname, secvalues in other.value_dict.items(): if secname in self.value_dict: - for name, value in secvalues.iteritems(): + for name, value in secvalues.items(): attrs = self.attr_dict[secname].get(name) if attrs is None: attrs = self.attr_dict[secname][name] = \ @@ -178,7 +178,7 @@ def read (self, filename, default_parset=False): section names are expanded. """ self.filename = filename - self.Config = config = ConfigParser.ConfigParser(dict_type=OrderedDict) + self.Config = config = configparser.ConfigParser(dict_type=OrderedDict) config.optionxform = str success = config.read(self.filename) self.success = bool(len(success)) @@ -223,9 +223,9 @@ def set (self, section, option, value): def write (self, f): """Writes the Parset out to a file object""" - for section, content in self.value_dict.iteritems(): + for section, content in self.value_dict.items(): f.write('[%s]\n'%section) - for option, value in content.iteritems(): + for option, value in content.items(): attrs = self.attr_dict.get(section, {}).get(option, {}) if option[0] != "_" and not attrs.get('cmdline_only') and not attrs.get('alias_of'): f.write('%s = %s \n'%(option, str(value))) diff --git a/cubical/tools/shared_dict.py b/cubical/tools/shared_dict.py index 7d89585a..badec967 100644 --- a/cubical/tools/shared_dict.py +++ b/cubical/tools/shared_dict.py @@ -6,8 +6,8 @@ # This module has been adapted from the DDFacet package, # (c) Cyril Tasse et al., see http://github.com/saopicc/DDFacet -import sys, os, os.path, cPickle, re -import NpShared +import sys, os, os.path, pickle, re +from . import NpShared import numpy as np import traceback import collections @@ -32,7 +32,7 @@ def create(name): def dict_to_shm(name, D): Ds=create(name) - for key in D.keys(): + for key in list(D.keys()): Ds[key]=D[key] return Ds @@ -65,7 +65,7 @@ def load(self): try: return self.load_impl() except: - print "Error loading item %s" % self.path + print("Error loading item %s" % self.path) traceback.print_exc() return SharedDict.ItemLoadError(path, sys.exc_info()) @@ -79,7 +79,7 @@ def load_impl(self): class PickleProxy(ItemProxy): def load_impl(self): - return cPickle.load(file(self.path)) + return pickle.load(file(self.path)) # this maps "class codes" parsed out of item filenames to appropriate item proxies. See reload() below _proxy_class_map = dict(a=SharedArrayProxy, d=SubdictProxy, p=PickleProxy) @@ -161,21 +161,21 @@ def reload(self): filepath = os.path.join(self.path, name) # each filename is composed as "key_type:name:value_type", e.g. "str:Data:a", where value_type # is looked up in _proxy_class_map to determine how to load the file - match = re.match("^(\w+):(.*):(%s)$" % "|".join(SharedDict._proxy_class_map.keys()), name) + match = re.match("^(\w+):(.*):(%s)$" % "|".join(list(SharedDict._proxy_class_map.keys())), name) if not match: - print "Can't parse shared dict entry " + filepath + print("Can't parse shared dict entry " + filepath) continue keytype, key, valuetype = match.groups() typefunc = _allowed_key_types.get(keytype) if typefunc is None: - print "Unknown shared dict key type "+keytype + print("Unknown shared dict key type "+keytype) continue key = typefunc(key) try: proxyclass = SharedDict._proxy_class_map[valuetype] dict.__setitem__(self, key, proxyclass(filepath)) except: - print "Error loading item %s"%name + print("Error loading item %s"%name) traceback.print_exc() pass @@ -237,7 +237,7 @@ def __setitem__(self, item, value): if not self._readwrite: raise RuntimeError("SharedDict %s attached as read-only" % self.path) if type(item).__name__ not in _allowed_key_types: - raise KeyError,"unsupported key of type "+type(item).__name__ + raise KeyError("unsupported key of type "+type(item).__name__) name = self._key_to_name(item) path = os.path.join(self.path, name) # remove previous item from SHM, if it's in the local dict @@ -259,12 +259,12 @@ def __setitem__(self, item, value): # for regular dicts, copy across elif isinstance(value, (dict, SharedDict, collections.OrderedDict)): dict1 = self.addSubdict(item) - for key1, value1 in value.iteritems(): + for key1, value1 in value.items(): dict1[key1] = value1 value = dict1 # all other types, just use pickle else: - cPickle.dump(value, file(path+'p', "w"), 2) + pickle.dump(value, file(path+'p', "w"), 2) dict.__setitem__(self, item, value) def addSubdict (self, item): @@ -303,10 +303,10 @@ def testSharedDict (): arr = subdict.addSharedArray("foo",(4, 4), np.float32) arr.fill(1) - print dic + print(dic) other_view = SharedDict("foo", reset=False) - print other_view + print(other_view) diff --git a/cubical/tools/shm_utils.py b/cubical/tools/shm_utils.py index 1f2e1177..ae455672 100644 --- a/cubical/tools/shm_utils.py +++ b/cubical/tools/shm_utils.py @@ -7,9 +7,9 @@ # (c) Cyril Tasse et al., see http://github.com/saopicc/DDFacet import os, re, errno -import logger -import NpShared -import shared_dict +from . import logger +from . import NpShared +from . import shared_dict log = logger.getLogger("shm_utils") @@ -61,13 +61,13 @@ def cleanupStaleShm (): for pid in set([x[1] for x in shmlist]): try: os.kill(pid, 0) - except OSError, err: + except OSError as err: if err.errno == errno.ESRCH: dead_pids.add(pid) # ok, make list of candidates for deletion victims = [ filename for filename,pid in shmlist if pid in dead_pids ] if victims: - print>>log, "reaping %d shared memory objects associated with %d dead cubical processes"%(len(victims), len(dead_pids)) + print("reaping %d shared memory objects associated with %d dead cubical processes"%(len(victims), len(dead_pids)), file=log) dirs = [ v for v in victims if os.path.isdir(v) ] files = [ v for v in victims if not os.path.isdir(v) ] # rm -fr only works for a limited number of arguments (which the semaphore list can easily exceed) diff --git a/cubical/workers.py b/cubical/workers.py index 445c245b..f528a0a2 100644 --- a/cubical/workers.py +++ b/cubical/workers.py @@ -34,35 +34,35 @@ def _setup_workers_and_threads(force_serial, ncpu, nworkers, nthreads, montblanc cubical.kernels.num_omp_threads = nthreads if nthreads: nthreads = max(nthreads, montblanc_threads) - print>> log(0, "blue"), "forcing single-process mode, {} OMP and/or Montblanc threads".format(nthreads) + print("forcing single-process mode, {} OMP and/or Montblanc threads".format(nthreads), file=log(0, "blue")) elif montblanc_threads: nthreads = montblanc_threads - print>> log(0, "blue"), "forcing single-process mode, single thread{}".format(montblanc) + print("forcing single-process mode, single thread{}".format(montblanc), file=log(0, "blue")) return False, 0, nthreads if nworkers and nthreads: - print>> log(0, "blue"), "multi-process mode: --dist-nworker {} (+1), --dist-nthread {}{}".format(nworkers, nthreads, montblanc) + print("multi-process mode: --dist-nworker {} (+1), --dist-nthread {}{}".format(nworkers, nthreads, montblanc), file=log(0, "blue")) return True, nworkers, nthreads if ncpu: cores = ncpu - (montblanc_threads or 1) if not nworkers and not nthreads: - print>> log(0, "blue"), "multi-process mode: {}+1 workers, single thread{}".format(cores, montblanc) + print("multi-process mode: {}+1 workers, single thread{}".format(cores, montblanc), file=log(0, "blue")) return True, cores, 1 if nworkers: nthreads = max(1, cores // nworkers) - print>> log(0, "blue"), "multi-process mode: --dist-nworker {} (+1), {} OMP threads{}".format(nworkers, nthreads, montblanc) + print("multi-process mode: --dist-nworker {} (+1), {} OMP threads{}".format(nworkers, nthreads, montblanc), file=log(0, "blue")) return True, nworkers, nthreads if nthreads: nworkers = max(1, cores // nthreads) - print>> log(0, "blue"), "multi-process mode: {}+1 workers, --dist-nthread {}{}".format(nworkers, nthreads, montblanc) + print("multi-process mode: {}+1 workers, --dist-nthread {}{}".format(nworkers, nthreads, montblanc), file=log(0, "blue")) return True, nworkers, nthreads else: # ncpu not set, and nworkers/nthreads not both set if nworkers: - print>> log(0, "blue"), "multi-process mode: --dist-nworker {} (+1), single thread{}".format(nworkers, montblanc) + print("multi-process mode: --dist-nworker {} (+1), single thread{}".format(nworkers, montblanc), file=log(0, "blue")) return True, nworkers, 1 if nthreads: - print>> log(0, "blue"), "single-process mode: --dist-thread {}{}".format(nthreads, montblanc) + print("single-process mode: --dist-thread {}{}".format(nthreads, montblanc), file=log(0, "blue")) return False, 0, nthreads - print>> log(0, "blue"), "single-process, single-thread mode{}".format(montblanc) + print("single-process, single-thread mode{}".format(montblanc), file=log(0, "blue")) return False, 0, 0 raise RuntimeError("can't be here -- this is a bug!") @@ -121,7 +121,7 @@ def setup_parallelism(ncpu, nworker, nthread, force_serial, affinity, io_affinit core = int(affinity) corestep = 1 elif re.match("^(\d+):(\d+)$", affinity): - core, corestep = map(int, affinity.split(":")) + core, corestep = list(map(int, affinity.split(":"))) else: raise ValueError("invalid affinity setting '{}'".format(affinity)) else: @@ -135,11 +135,11 @@ def setup_parallelism(ncpu, nworker, nthread, force_serial, affinity, io_affinit # for now, since we can't figure out this tensorflow affinity shit if affinity is not None and io_affinity and use_montblanc: io_affinity = None - print>>log(0,"red"),"Montblanc currently does not support CPU affinity settings: ignoring --dist-pin-io" + print("Montblanc currently does not support CPU affinity settings: ignoring --dist-pin-io", file=log(0,"red")) if affinity is not None and io_affinity: num_io_cores = montblanc_threads if use_montblanc else 1 - io_cores = range(core,core+num_io_cores*corestep,corestep) + io_cores = list(range(core,core+num_io_cores*corestep,corestep)) core = core + num_io_cores * corestep # if Montblanc is in use, affinity controlled by GOMP setting, else by taskset if use_montblanc: @@ -163,14 +163,14 @@ def setup_parallelism(ncpu, nworker, nthread, force_serial, affinity, io_affinit core = core + corestep # create entries for subprocesses, and allocate cores - for icpu in xrange(1, num_workers + 1): + for icpu in range(1, num_workers + 1): name = "Process-{}".format(icpu + 1) props = worker_process_properties[name] = dict(label="x%02d" % icpu, num_omp_threads=nthread, environ={}) if affinity is not None: props["taskset"] = str(core) # if OMP is in use, set affinities via gomp if nthread: - worker_cores = range(core, core + nthread * corestep, corestep) + worker_cores = list(range(core, core + nthread * corestep, corestep)) core += nthread * corestep props["environ"]["GOMP_CPU_AFFINITY"] = " ".join(map(str, worker_cores)) else: @@ -235,8 +235,8 @@ def _run_multi_process_loop(ms, load_model, solver_type, solver_opts, debug_opts def reap_children(): pid, status, _ = os.wait3(os.WNOHANG) if pid: - print>>log(0,"red"),"child process {} exited with status {}. This is a bug, or an out-of-memory condition.".format(pid, status) - print>>log(0,"red"),"This error is not recoverable: the main process will now commit ritual harakiri." + print("child process {} exited with status {}. This is a bug, or an out-of-memory condition.".format(pid, status), file=log(0,"red")) + print("This error is not recoverable: the main process will now commit ritual harakiri.", file=log(0,"red")) os._exit(1) raise RuntimeError("child process {} exited with status {}".format(pid, status)) @@ -257,7 +257,7 @@ def reap_children(): for itile, tile in enumerate(tile_list): # wait for I/O job on current tile to finish - print>> log(0), "waiting for I/O on {}".format(tile.label) + print("waiting for I/O on {}".format(tile.label), file=log(0)) # have a timeout so that if a child process dies, we at least find out done = False while not done: @@ -279,24 +279,24 @@ def reap_children(): # submit solver jobs solver_futures = {} - print>> log(0), "submitting solver jobs for {}".format(tile.label) + print("submitting solver jobs for {}".format(tile.label), file=log(0)) for key in tile.get_chunk_keys(): solver_futures[executor.submit(solver.run_solver, solver_type, itile, key, solver_opts, debug_opts)] = key - print>> log(3), "submitted solver job for chunk {}".format(key) + print("submitted solver job for chunk {}".format(key), file=log(3)) # wait for solvers to finish while solver_futures: reap_children() - done, not_done = cf.wait(solver_futures.keys(), timeout=1) + done, not_done = cf.wait(list(solver_futures.keys()), timeout=1) for future in done: key = solver_futures[future] stats = future.result() stats_dict[tile.get_chunk_indices(key)] = stats - print>> log(3), "handled result of chunk {}".format(key) + print("handled result of chunk {}".format(key), file=log(3)) del solver_futures[future] - print>> log(0), "finished processing {}".format(tile.label) + print("finished processing {}".format(tile.label), file=log(0)) # ok, at this stage we've iterated over all the tiles, but there's an outstanding # I/O job saving the second-to-last tile (which was submitted with itile+1), and the last tile was @@ -351,11 +351,11 @@ def _run_single_process_loop(ms, load_model, single_chunk, solver_type, solver_o solver.gm_factory.save_solutions(sd) solver.ifrgain_machine.accumulate(sd) else: - print>> log(0), " single-chunk {} not in this tile, skipping it.".format(single_chunk) + print(" single-chunk {} not in this tile, skipping it.".format(single_chunk), file=log(0)) tile.release() # break out after single chunk is processed if processed and single_chunk: - print>> log(0, "red"), "single-chunk {} was processed in this tile. Will now finish".format(single_chunk) + print("single-chunk {} was processed in this tile. Will now finish".format(single_chunk), file=log(0, "red")) break solver.ifrgain_machine.save() solver.gm_factory.set_metas(ms) @@ -378,8 +378,8 @@ def _init_worker(main=False): name = multiprocessing.current_process().name if name not in worker_process_properties: - print>> log(0, "red"), "WARNING: unrecognized worker process name '{}'. " \ - "Please inform the developers.".format(name) + print("WARNING: unrecognized worker process name '{}'. " \ + "Please inform the developers.".format(name), file=log(0, "red")) return props = worker_process_properties[name] @@ -389,18 +389,18 @@ def _init_worker(main=False): taskset = props.get("taskset") if taskset is not None: - print>>log(1,"blue"),"pid {}, setting CPU affinity to {} with taskset".format(os.getpid(), taskset) + print("pid {}, setting CPU affinity to {} with taskset".format(os.getpid(), taskset), file=log(1,"blue")) os.system("taskset -pc {} {} >/dev/null".format(taskset, os.getpid())) environ = props.get("environ") if environ: os.environ.update(environ) - for key, value in environ.iteritems(): - print>>log(1,"blue"),"setting {}={}".format(key, value) + for key, value in environ.items(): + print("setting {}={}".format(key, value), file=log(1,"blue")) num_omp_threads = props.get("num_omp_threads") if num_omp_threads is not None: - print>> log(1,"blue"), "enabling {} OMP threads".format(num_omp_threads) + print("enabling {} OMP threads".format(num_omp_threads), file=log(1,"blue")) import cubical.kernels cubical.kernels.num_omp_threads = num_omp_threads @@ -429,8 +429,8 @@ def _io_handler(save=None, load=None, load_model=True, finalize=False): result = {'success': True} if save is not None: tile = tile_list[save] - itile = range(len(tile_list))[save] - print>>log(0, "blue"),"saving {}".format(tile.label) + itile = list(range(len(tile_list)))[save] + print("saving {}".format(tile.label), file=log(0, "blue")) tile.save(final=finalize) for sd in tile.iterate_solution_chunks(): solver.gm_factory.save_solutions(sd) @@ -445,12 +445,12 @@ def _io_handler(save=None, load=None, load_model=True, finalize=False): tile.release() if load is not None: tile = tile_list[load] - print>>log(0, "blue"),"loading {}".format(tile.label) + print("loading {}".format(tile.label), file=log(0, "blue")) tile.load(load_model=load_model) - print>> log(0, "blue"), "I/O job(s) complete" + print("I/O job(s) complete", file=log(0, "blue")) return result - except Exception, exc: - print>> log(0, "red"),"I/O handler for load {} save {} failed with exception: {}".format(load, save, exc) - print>> log, traceback.format_exc() + except Exception as exc: + print("I/O handler for load {} save {} failed with exception: {}".format(load, save, exc), file=log(0, "red")) + print(traceback.format_exc(), file=log) raise diff --git a/test/benchmark/kernel_timings.py b/test/benchmark/kernel_timings.py index 30398225..efc8e5f3 100644 --- a/test/benchmark/kernel_timings.py +++ b/test/benchmark/kernel_timings.py @@ -20,7 +20,7 @@ def conj2x2(x,y): def reroll_array(arr, axes): """Returns array where the axes are stored in a specific order""" - axes = axes or range(len(arr.shape)) + axes = axes or list(range(len(arr.shape))) realarray = arr.transpose(axes).copy(order='C') return realarray.transpose(np.argsort(axes)) @@ -62,7 +62,7 @@ def __init__(self, nd=10, nm=1, nt=60, nf=32, na=28, t_int=1, f_int=1, self.p = np.zeros(self._pshape, ptype) self._kernel_name = refkern.__name__ self.na = na - self.baselines = [(p, q) for p in xrange(self.na) for q in xrange(self.na) if p < q] + self.baselines = [(p, q) for p in range(self.na) for q in range(self.na) if p < q] if allocate: for p, q in self.baselines: if diagmodel: @@ -74,7 +74,7 @@ def __init__(self, nd=10, nm=1, nt=60, nf=32, na=28, t_int=1, f_int=1, fillrand(self.m[..., p, q, :, :]) conj2x2(self.o[..., q, p, :, :], self.o[..., p, q, :, :]) conj2x2(self.m[..., q, p, :, :], self.m[..., p, q, :, :]) - for p in xrange(self.na): + for p in range(self.na): if diaggain: for c in 0,1: fillrand(self.g[..., p, c, c]) @@ -84,7 +84,7 @@ def __init__(self, nd=10, nm=1, nt=60, nf=32, na=28, t_int=1, f_int=1, def fillrest(self): self.gh = np.zeros_like(self.g) - for p in xrange(self.na): + for p in range(self.na): conj2x2(self.gh[..., p, :, :], self.g[..., p, :, :]) self.jh = np.zeros_like(self.m) self.jhr = np.zeros(self._jhrshape, self._ptype) @@ -93,9 +93,9 @@ def fillrest(self): self.corr = np.zeros_like(self.o) def printshapes(self): - for name, value in self.__dict__.iteritems(): + for name, value in self.__dict__.items(): if type(value) is np.ndarray: - print(" .{}: {}".format(name, value.shape)) + print((" .{}: {}".format(name, value.shape))) class OrderedArrays(UnorderedArrays): """Creates a set of test arrays for testing the cubical kernels. @@ -115,7 +115,7 @@ def __init__(self, other, kernel, pshape=None, jhrshape=None): np.copyto(getattr(self, arr), getattr(other, arr)) # populate derived arrays self.fillrest() - print "Array shapes are:" + print("Array shapes are:") self.printshapes() @@ -131,7 +131,7 @@ def __exit__(self, type, value, traceback): def benchmark(code, name, n=3): res = timeit.repeat(code, repeat=n, number=1) - print "{:70}: {:.2f}ms (best of {})".format(name, min(res)*1000, n) + print("{:70}: {:.2f}ms (best of {})".format(name, min(res)*1000, n)) def benchmark_all(module, function_name, arguments, setup=None, check=None, notes=''): modname = module.__name__.split('.')[-1] @@ -141,7 +141,7 @@ def benchmark_all(module, function_name, arguments, setup=None, check=None, note setup() benchmark(lambda:getattr(module, funcname)(*arguments), "{}.{} ({})".format(modname, funcname, notes)) if check is not None and not check(): - print "*** FAIL ***" + print("*** FAIL ***") global nfailed nfailed += 1 @@ -179,8 +179,8 @@ def benchmark_all(module, function_name, arguments, setup=None, check=None, note refkern = cubical.kernels.import_kernel(refkern_name) testkerns = [ kernels[name] for name in kernel_names ] - print "\n### Reference kernel:", refkern_name - print "### Test kernels:"," ".join(kernel_names) + print("\n### Reference kernel:", refkern_name) + print("### Test kernels:"," ".join(kernel_names)) nt, nf, na = args.nt, args.nf, args.na, @@ -189,8 +189,8 @@ def benchmark_all(module, function_name, arguments, setup=None, check=None, note THREADS = [1] if not args.omp else [1, args.omp] NDIRS = [1] if not args.nd else args.nd - print "### {} threads, {} dirs, {} times, {} freqs, {} antennas, intervals {} {}\n".format(THREADS,NDIRS,nt,nf,na,t_int,f_int) - print "### ordered memory layout determined by {} kernel".format(kernel_names[0]) + print("### {} threads, {} dirs, {} times, {} freqs, {} antennas, intervals {} {}\n".format(THREADS,NDIRS,nt,nf,na,t_int,f_int)) + print("### ordered memory layout determined by {} kernel".format(kernel_names[0])) def benchmark_function(function, arguments, setup=None, check=None): for kern in testkerns: @@ -232,7 +232,7 @@ def benchmark_function(function, arguments, setup=None, check=None): diagmodel=args.diagmodel, kernel=refkern) o = OrderedArrays(u, testkerns[0], pshape=[nparm,2,2], jhrshape=[nb,2,2]) - print "\n### Testing {} directions, model shape is {}\n".format(nd, u.m.shape) + print("\n### Testing {} directions, model shape is {}\n".format(nd, u.m.shape)) print('*** RES') @@ -332,7 +332,7 @@ def benchmark_function(function, arguments, setup=None, check=None): diagmodel=args.diagmodel, kernel=refkern) o = OrderedArrays(u, testkerns[0]) - print "\n### Testing {} directions, model shape is {}\n".format(nd, u.m.shape) + print("\n### Testing {} directions, model shape is {}\n".format(nd, u.m.shape)) print('*** RES') diff --git a/test/d147_test.py b/test/d147_test.py index a9962e2c..dfb2aaba 100644 --- a/test/d147_test.py +++ b/test/d147_test.py @@ -11,14 +11,14 @@ def kw_to_args(**kw): cmd = "--sol-jones {} ".format(kw.pop("sol_jones")) else: cmd = "" - cmd += " ".join(["--{} {}".format(name.replace("_", "-"), value) for name, value in kw.items()]) + cmd += " ".join(["--{} {}".format(name.replace("_", "-"), value) for name, value in list(kw.items())]) return cmd basedir = os.path.dirname(__file__) def logprint(arg): - print>>sys.stderr,arg + print(arg, file=sys.stderr) class SolverVerification(object): def __init__(self, msname, refmsname, parset, workdir="."): diff --git a/test/kernels_test.py b/test/kernels_test.py index 4e2d0ea3..31019fc8 100644 --- a/test/kernels_test.py +++ b/test/kernels_test.py @@ -1,7 +1,7 @@ import os, sys, os.path def logprint(arg): - print>>sys.stderr,arg + print(arg, file=sys.stderr) def kernels_test(): From 6b0e1a59e1b022c8cd7f6cb7c857b8169efb5a5a Mon Sep 17 00:00:00 2001 From: Gijs Molenaar Date: Wed, 15 May 2019 12:00:26 +0200 Subject: [PATCH 02/25] ignore virtualen vstuff --- .gitignore | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index cba78ef5..031c9b95 100644 --- a/.gitignore +++ b/.gitignore @@ -88,4 +88,8 @@ docs/_templates/ *.orig # databases -*_db \ No newline at end of file +*_db + +# virtualenvs +.venv*/ +.virtualenv*/ From 4a94c61e0d59c1d111173e166208cb547945a9af Mon Sep 17 00:00:00 2001 From: Gijs Molenaar Date: Wed, 15 May 2019 14:39:15 +0200 Subject: [PATCH 03/25] configparser is stricter --- cubical/DefaultParset.cfg | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/cubical/DefaultParset.cfg b/cubical/DefaultParset.cfg index 9bae4af6..99beddd7 100755 --- a/cubical/DefaultParset.cfg +++ b/cubical/DefaultParset.cfg @@ -193,7 +193,7 @@ flag-ant-thr = 5 # Threshold (in sigmas) used to flag bad antenna [sol] _Help = Solution options which apply at the solver level jones = G # Comma-separated list of Jones terms to enable, e.g. "G,B,dE" - (default: %default) + (default: default) precision = 32 # Solve in single or double precision #options:32|64 delta-g = 1e-6 # Theshold for gain accuracy - gains which improve by less than this value are considered converged. DEPRECATED FOR PER-JONES epsilon OPTION. @@ -350,8 +350,6 @@ prop-flags = default # Flag propagation policy. Determines how flags #options:never|always|default estimate-pzd = 0 # Estimate phase-zero difference and initialize the gains with it. Use for polarization calibration. #type:bool -estimate-pzd = 0 # Estimate phase-zero difference and initialize the gains with it. - Use for polarization calibration. #type:bool diag-only = 0 # Use only diagonal (parallel-hand) data and model terms for the solution. Note that gains are still applied to the full 2x2 data (unless --sel-diag is also set). #type:bool From b377357d3481b4511e90eb595fd1933bfc8901dc Mon Sep 17 00:00:00 2001 From: Gijs Molenaar Date: Wed, 15 May 2019 14:39:37 +0200 Subject: [PATCH 04/25] fix print sytnax --- cubical/data_handler/ms_data_handler.py | 1 + cubical/data_handler/ms_tile.py | 1 + cubical/database/iface_database.py | 5 ++++- cubical/database/parameter.py | 2 +- cubical/database/pickled_db.py | 2 +- cubical/flagging.py | 1 + cubical/machines/abstract_machine.py | 6 +++++- cubical/machines/complex_2x2_machine.py | 1 + cubical/machines/complex_W_2x2_machine.py | 1 + cubical/machines/ifr_gain_machine.py | 1 + cubical/machines/interval_gain_machine.py | 2 +- cubical/machines/jones_chain_machine.py | 1 + cubical/machines/jones_chain_robust_machine.py | 1 + cubical/machines/parallactic_machine.py | 2 +- cubical/machines/phase_diag_machine.py | 2 +- cubical/madmax/flagger.py | 5 +++-- cubical/madmax/plots.py | 1 + cubical/main.py | 2 +- cubical/plots/ifrgains.py | 6 ++++-- cubical/plots/stats.py | 2 +- cubical/solver.py | 4 ++-- cubical/statistics.py | 2 +- cubical/tools/ClassPrint.py | 2 +- cubical/tools/NpShared.py | 1 + cubical/tools/dynoptparse.py | 7 ++++--- cubical/tools/logger.py | 4 ++-- cubical/tools/shm_utils.py | 2 +- cubical/workers.py | 1 + setup.py | 3 ++- test/benchmark/kernel_timings.py | 6 ++++-- test/d147_test.py | 2 +- test/kernels_test.py | 1 + 32 files changed, 53 insertions(+), 27 deletions(-) diff --git a/cubical/data_handler/ms_data_handler.py b/cubical/data_handler/ms_data_handler.py index ec4ce2f8..26457985 100644 --- a/cubical/data_handler/ms_data_handler.py +++ b/cubical/data_handler/ms_data_handler.py @@ -2,6 +2,7 @@ # (c) 2017 Rhodes University & Jonathan S. Kenyon # http://github.com/ratt-ru/CubiCal # This code is distributed under the terms of GPLv2, see LICENSE.md for details +from __future__ import print_function import numpy as np from collections import OrderedDict import pyrap.tables as pt diff --git a/cubical/data_handler/ms_tile.py b/cubical/data_handler/ms_tile.py index 6c27c6c2..6ca5732d 100644 --- a/cubical/data_handler/ms_tile.py +++ b/cubical/data_handler/ms_tile.py @@ -2,6 +2,7 @@ # (c) 2017 Rhodes University & Jonathan S. Kenyon # http://github.com/ratt-ru/CubiCal # This code is distributed under the terms of GPLv2, see LICENSE.md for details +from __future__ import print_function import numpy as np from collections import OrderedDict import traceback diff --git a/cubical/database/iface_database.py b/cubical/database/iface_database.py index 6d1b80e7..94bbdc4e 100644 --- a/cubical/database/iface_database.py +++ b/cubical/database/iface_database.py @@ -5,9 +5,12 @@ """ Defines database interface """ +from six import add_metaclass import abc -class iface_database(object, metaclass=abc.ABCMeta): + +@add_metaclass(abc.ABCMeta) +class iface_database: @abc.abstractmethod def __init__(self): raise NotImplementedError("To be defined") diff --git a/cubical/database/parameter.py b/cubical/database/parameter.py index 2f5f1e20..1370db68 100644 --- a/cubical/database/parameter.py +++ b/cubical/database/parameter.py @@ -5,7 +5,7 @@ """ Handles parameter databases which can contain solutions and other relevant values. """ - +from __future__ import print_function import numpy as np from numpy.ma import masked_array from cubical.tools import logger diff --git a/cubical/database/pickled_db.py b/cubical/database/pickled_db.py index c1414759..0aec467b 100644 --- a/cubical/database/pickled_db.py +++ b/cubical/database/pickled_db.py @@ -5,7 +5,7 @@ """ Handles parameter databases which can contain solutions and other relevant values. """ - +from __future__ import print_function import pickle, os, os.path import numpy as np import traceback diff --git a/cubical/flagging.py b/cubical/flagging.py index 3b8028cf..40b8c942 100644 --- a/cubical/flagging.py +++ b/cubical/flagging.py @@ -8,6 +8,7 @@ # This is to keep matplotlib from falling over when no DISPLAY is set (which it otherwise does, # even if one is only trying to save figures to .png. +from __future__ import print_function import numpy as np import re diff --git a/cubical/machines/abstract_machine.py b/cubical/machines/abstract_machine.py index 4bf8ed98..ae5eaeef 100644 --- a/cubical/machines/abstract_machine.py +++ b/cubical/machines/abstract_machine.py @@ -2,6 +2,8 @@ # (c) 2017 Rhodes University & Jonathan S. Kenyon # http://github.com/ratt-ru/CubiCal # This code is distributed under the terms of GPLv2, see LICENSE.md for details +from __future__ import print_function +from six import add_metaclass from abc import ABCMeta, abstractmethod, abstractproperty import numpy as np from numpy.ma import masked_array @@ -15,7 +17,9 @@ log = logger.getLogger("gain_machine") -class MasterMachine(object, metaclass=ABCMeta): + +@add_metaclass(ABCMeta) +class MasterMachine: """ This is a base class for all solution machines. It is completely generic and lays out the basic requirements for all machines. diff --git a/cubical/machines/complex_2x2_machine.py b/cubical/machines/complex_2x2_machine.py index d6dc5c10..379113b7 100644 --- a/cubical/machines/complex_2x2_machine.py +++ b/cubical/machines/complex_2x2_machine.py @@ -2,6 +2,7 @@ # (c) 2017 Rhodes University & Jonathan S. Kenyon # http://github.com/ratt-ru/CubiCal # This code is distributed under the terms of GPLv2, see LICENSE.md for details +from __future__ import print_function from cubical.machines.interval_gain_machine import PerIntervalGains import numpy as np from cubical.flagging import FL diff --git a/cubical/machines/complex_W_2x2_machine.py b/cubical/machines/complex_W_2x2_machine.py index f857e198..54c73fa9 100644 --- a/cubical/machines/complex_W_2x2_machine.py +++ b/cubical/machines/complex_W_2x2_machine.py @@ -2,6 +2,7 @@ # (c) 2017 Rhodes University & Jonathan S. Kenyon # http://github.com/ratt-ru/CubiCal # This code is distributed under the terms of GPLv2, see LICENSE.md for details +from __future__ import print_function from cubical.machines.interval_gain_machine import PerIntervalGains import numpy as np from scipy import special diff --git a/cubical/machines/ifr_gain_machine.py b/cubical/machines/ifr_gain_machine.py index 3e8530e3..a43d80ff 100644 --- a/cubical/machines/ifr_gain_machine.py +++ b/cubical/machines/ifr_gain_machine.py @@ -2,6 +2,7 @@ # (c) 2017 Rhodes University & Jonathan S. Kenyon # http://github.com/ratt-ru/CubiCal # This code is distributed under the terms of GPLv2, see LICENSE.md for details +from __future__ import print_function import numpy as np from numpy.ma import masked_array diff --git a/cubical/machines/interval_gain_machine.py b/cubical/machines/interval_gain_machine.py index f199db47..1563824e 100644 --- a/cubical/machines/interval_gain_machine.py +++ b/cubical/machines/interval_gain_machine.py @@ -2,7 +2,7 @@ # (c) 2017 Rhodes University & Jonathan S. Kenyon # http://github.com/ratt-ru/CubiCal # This code is distributed under the terms of GPLv2, see LICENSE.md for details - +from __future__ import print_function import numpy as np from cubical.flagging import FL from cubical.machines.abstract_machine import MasterMachine diff --git a/cubical/machines/jones_chain_machine.py b/cubical/machines/jones_chain_machine.py index ecd7966e..1a50161a 100644 --- a/cubical/machines/jones_chain_machine.py +++ b/cubical/machines/jones_chain_machine.py @@ -2,6 +2,7 @@ # (c) 2017 Rhodes University & Jonathan S. Kenyon # http://github.com/ratt-ru/CubiCal # This code is distributed under the terms of GPLv2, see LICENSE.md for details +from __future__ import print_function from cubical.machines.abstract_machine import MasterMachine from cubical.machines.complex_2x2_machine import Complex2x2Gains from cubical.machines.complex_W_2x2_machine import ComplexW2x2Gains diff --git a/cubical/machines/jones_chain_robust_machine.py b/cubical/machines/jones_chain_robust_machine.py index 18956d50..73b4cd5e 100644 --- a/cubical/machines/jones_chain_robust_machine.py +++ b/cubical/machines/jones_chain_robust_machine.py @@ -2,6 +2,7 @@ # (c) 2017 Rhodes University & Jonathan S. Kenyon # http://github.com/ratt-ru/CubiCal # This code is distributed under the terms of GPLv2, see LICENSE.md for details +from __future__ import print_function from cubical.machines.abstract_machine import MasterMachine from cubical.machines.complex_W_2x2_machine import ComplexW2x2Gains import numpy as np diff --git a/cubical/machines/parallactic_machine.py b/cubical/machines/parallactic_machine.py index 05099ebc..8e0103d9 100644 --- a/cubical/machines/parallactic_machine.py +++ b/cubical/machines/parallactic_machine.py @@ -1,5 +1,5 @@ - +from __future__ import print_function import pyrap.quanta as pq import pyrap.measures pm = pyrap.measures.measures() diff --git a/cubical/machines/phase_diag_machine.py b/cubical/machines/phase_diag_machine.py index 86da9dbf..e73b62e5 100644 --- a/cubical/machines/phase_diag_machine.py +++ b/cubical/machines/phase_diag_machine.py @@ -2,7 +2,7 @@ # (c) 2017 Rhodes University & Jonathan S. Kenyon # http://github.com/ratt-ru/CubiCal # This code is distributed under the terms of GPLv2, see LICENSE.md for details - +from __future__ import print_function from cubical.machines.interval_gain_machine import PerIntervalGains import numpy as np from cubical.flagging import FL diff --git a/cubical/madmax/flagger.py b/cubical/madmax/flagger.py index d7f3e9cd..3a8f84b1 100644 --- a/cubical/madmax/flagger.py +++ b/cubical/madmax/flagger.py @@ -1,3 +1,4 @@ +from __future__ import print_function import numpy as np import os, os.path import traceback @@ -108,7 +109,7 @@ def get_plot_filename(self, kind=''): self._plotnum += 1 return filename - @profile + @builtins.profile def report_carnage(self, absres, mad, baddies, flags_arr, method, max_label): made_plots = False n_tim, n_fre, n_ant, n_ant = baddies.shape @@ -187,7 +188,7 @@ def report_carnage(self, absres, mad, baddies, flags_arr, method, max_label): return made_plots, nbad>0 - @profile + @builtins.profile def beyond_thunderdome(self, resid_arr, data_arr, model_arr, flags_arr, threshold, med_threshold, max_label): """This function implements MAD-based flagging on residuals""" if not threshold and not med_threshold: diff --git a/cubical/madmax/plots.py b/cubical/madmax/plots.py index e7e47f15..3b16b6c4 100644 --- a/cubical/madmax/plots.py +++ b/cubical/madmax/plots.py @@ -1,3 +1,4 @@ +from __future__ import print_function import numpy as np import traceback diff --git a/cubical/main.py b/cubical/main.py index 08bb7f4c..dfd3ad3d 100644 --- a/cubical/main.py +++ b/cubical/main.py @@ -13,7 +13,7 @@ # logging.root.removeHandler(handler) # logging.getLogger('vext').setLevel(logging.WARNING) ## - +from __future__ import print_function import pickle import os, os.path import sys diff --git a/cubical/plots/ifrgains.py b/cubical/plots/ifrgains.py index 211b4536..49c50b3f 100644 --- a/cubical/plots/ifrgains.py +++ b/cubical/plots/ifrgains.py @@ -1,8 +1,10 @@ +from __future__ import print_function import math,cmath import numpy as np import numpy.ma as ma from cubical.tools import logger log = logger.getLogger("plots") +from past.builtins import cmp from cubical.plots import DPI, ZOOM, make_antenna_xaxis @@ -154,8 +156,8 @@ def plot_complex(content, title): # for l1,l2,(x,xe),(y,ye) in content ]) minre, maxre, minim, maxim = 2, -2, 2, -2 for l1, l2, (x, xe), (y, ye) in content: - offs = np.array([getattr(v, attr) + sign * e / 4 for v, e in (x, xe), (y, ye) - for attr in 'real', 'imag' for sign in 1, -1]) + offs = np.array([getattr(v, attr) + sign * e / 4 for v, e in ((x, xe), (y, ye)) + for attr in ('real', 'imag') for sign in (1, -1)]) minre, maxre = min(x.real - xe / 4, y.real - ye / 4, minre), max(x.real + xe / 4, y.real + ye / 4, maxre) minim, maxim = min(x.imag - xe / 4, y.imag - ye / 4, minim), max(x.imag + xe / 4, y.imag + ye / 4, maxim) # plot labels diff --git a/cubical/plots/stats.py b/cubical/plots/stats.py index 799fd361..4b1fdf29 100644 --- a/cubical/plots/stats.py +++ b/cubical/plots/stats.py @@ -5,7 +5,7 @@ """ Creates summary plots using the solver stats. """ - +from __future__ import print_function import numpy as np from cubical.tools import logger log = logger.getLogger("plots") diff --git a/cubical/solver.py b/cubical/solver.py index e426ba61..d72a5096 100644 --- a/cubical/solver.py +++ b/cubical/solver.py @@ -5,7 +5,7 @@ """ Implements the solver loop. """ - +from __future__ import print_function import numpy as np import os, os.path import traceback @@ -49,7 +49,7 @@ def profile(func): return func builtins.profile = profile -@profile +@builtins.profile def _solve_gains(gm, stats, madmax, obser_arr, model_arr, flags_arr, sol_opts, label="", compute_residuals=None): """ Main body of the GN/LM method. Handles iterations and convergence tests. diff --git a/cubical/statistics.py b/cubical/statistics.py index 595f2959..3a6383be 100644 --- a/cubical/statistics.py +++ b/cubical/statistics.py @@ -5,7 +5,7 @@ """ Handles solver statistics. """ - +from __future__ import print_function import math import numpy as np import pickle diff --git a/cubical/tools/ClassPrint.py b/cubical/tools/ClassPrint.py index 9d5abce7..d10b85e7 100644 --- a/cubical/tools/ClassPrint.py +++ b/cubical/tools/ClassPrint.py @@ -5,7 +5,7 @@ # This module has been adapted from the DDFacet package, # (c) Cyril Tasse et al., see http://github.com/saopicc/DDFacet - +from __future__ import print_function import os import sys from . import ModColor diff --git a/cubical/tools/NpShared.py b/cubical/tools/NpShared.py index 52a89cab..e7159eab 100644 --- a/cubical/tools/NpShared.py +++ b/cubical/tools/NpShared.py @@ -8,6 +8,7 @@ # (c) Cyril Tasse et al., see http://github.com/saopicc/DDFacet #import sharedarray.SharedArray as SharedArray +from __future__ import print_function import SharedArray from . import ModColor from . import logger diff --git a/cubical/tools/dynoptparse.py b/cubical/tools/dynoptparse.py index cc82018d..af9fc072 100644 --- a/cubical/tools/dynoptparse.py +++ b/cubical/tools/dynoptparse.py @@ -5,7 +5,8 @@ # This module has been adapted from the DDFacet package, # (c) Cyril Tasse et al., see http://github.com/saopicc/DDFacet - +from __future__ import print_function +from six import string_types import sys, re, optparse from collections import OrderedDict @@ -129,7 +130,7 @@ def read_input(self): group, name = self._parse_dest_key(key) group_dict = self._defaults[group] attrs = self._attributes.get(group, {}).get(name, {}) - if type(value) is str: + if isinstance(value, string_types): value, _ = parsets.parse_config_string(value, name=name, extended=False, type=attrs.get('type')) group_dict[name] = value alias = attrs.get('alias') or attrs.get('alias_of') @@ -193,7 +194,7 @@ def _instantiate_section_template_callback(self, option, opt_str, value, parser, setattr(parser.values, option.dest, value) print("callback invoked for {}".format(value), file=log(2)) # get template contents - if type(value) is str: + if isinstance(value, string_types): value = value.split(",") elif type(value) is not list: raise TypeError("list or string expected for {}, got {}".format(opt_str, type(value))) diff --git a/cubical/tools/logger.py b/cubical/tools/logger.py index 825479d1..757dcdf1 100644 --- a/cubical/tools/logger.py +++ b/cubical/tools/logger.py @@ -6,7 +6,7 @@ # This module has been adapted from the DDFacet package, # (c) Cyril Tasse et al., see http://github.com/saopicc/DDFacet - +from __future__ import print_function import logging, logging.handlers, os, re, sys, multiprocessing from . import ModColor @@ -270,7 +270,7 @@ def init(app_name): global _app_name global _root_logger if _root_logger is None: - logging.basicConfig(level=logging.DEBUG, fmt=_fmt, datefmt=_datefmt) + logging.basicConfig(level=logging.DEBUG, format=_fmt, datefmt=_datefmt) _app_name = app_name _root_logger = logging.getLogger(app_name) _root_logger.setLevel(logging.DEBUG) diff --git a/cubical/tools/shm_utils.py b/cubical/tools/shm_utils.py index ae455672..019ba3eb 100644 --- a/cubical/tools/shm_utils.py +++ b/cubical/tools/shm_utils.py @@ -5,7 +5,7 @@ # This module has been adapted from the DDFacet package, # (c) Cyril Tasse et al., see http://github.com/saopicc/DDFacet - +from __future__ import print_function import os, re, errno from . import logger from . import NpShared diff --git a/cubical/workers.py b/cubical/workers.py index f528a0a2..4bdb1bac 100644 --- a/cubical/workers.py +++ b/cubical/workers.py @@ -1,3 +1,4 @@ +from __future__ import print_function import multiprocessing, os, sys, traceback import concurrent.futures as cf import re diff --git a/setup.py b/setup.py index ce33d466..ea8b8972 100644 --- a/setup.py +++ b/setup.py @@ -133,7 +133,8 @@ def run(self): 'matplotlib', 'scipy'] else: - requirements = ['numpy', + requirements = ['future', + 'numpy', 'futures', 'python-casacore>=2.1.2', 'sharedarray', diff --git a/test/benchmark/kernel_timings.py b/test/benchmark/kernel_timings.py index efc8e5f3..591c63c0 100644 --- a/test/benchmark/kernel_timings.py +++ b/test/benchmark/kernel_timings.py @@ -1,3 +1,5 @@ +from __future__ import print_function +from __future__ import division import numpy as np import timeit import numpy.random @@ -39,8 +41,8 @@ def __init__(self, nd=10, nm=1, nt=60, nf=32, na=28, t_int=1, f_int=1, self.m = np.zeros((nd,nm,nt,nf,na,na,2,2), dtype) self.r = np.zeros((nm,nt,nf,na,na,2,2), dtype) # intervals? - nt1 = nt/t_int + (1 if nt%t_int else 0) - nf1 = nf/f_int + (1 if nf%f_int else 0) + nt1 = nt//t_int + (1 if nt%t_int else 0) + nf1 = nf//f_int + (1 if nf%f_int else 0) self._intshape = [nd,nt1,nf1,na,2,2] self._fullshape = [nd,nt,nf,na,2,2] self._paramgain = (pshape is not None) diff --git a/test/d147_test.py b/test/d147_test.py index dfb2aaba..733e48eb 100644 --- a/test/d147_test.py +++ b/test/d147_test.py @@ -1,5 +1,5 @@ #!/usr/bin/python - +from __future__ import print_function import os, os.path, sys from casacore.tables import table import numpy as np diff --git a/test/kernels_test.py b/test/kernels_test.py index 31019fc8..5b251b60 100644 --- a/test/kernels_test.py +++ b/test/kernels_test.py @@ -1,3 +1,4 @@ +from __future__ import print_function import os, sys, os.path def logprint(arg): From 9e4ba9146c600dc01fe429769e042dbc6ff77ea3 Mon Sep 17 00:00:00 2001 From: Gijs Molenaar Date: Wed, 15 May 2019 15:04:40 +0200 Subject: [PATCH 05/25] porting --- cubical/data_handler/TiggerSourceProvider.py | 4 ++-- cubical/data_handler/ms_data_handler.py | 13 ++++++------- cubical/database/pickled_db.py | 11 +++++++---- cubical/flagging.py | 4 +++- cubical/main.py | 9 +++++---- cubical/tools/logger.py | 9 +++++---- cubical/tools/shared_dict.py | 4 ++-- 7 files changed, 30 insertions(+), 24 deletions(-) diff --git a/cubical/data_handler/TiggerSourceProvider.py b/cubical/data_handler/TiggerSourceProvider.py index d92663bf..18b30a53 100644 --- a/cubical/data_handler/TiggerSourceProvider.py +++ b/cubical/data_handler/TiggerSourceProvider.py @@ -5,7 +5,7 @@ """ Source provider for reading source information from a Tigger lsm. """ - +from six import string_types import logging import numpy as np @@ -241,7 +241,7 @@ def cluster_sources(sm, dde_tag): if dde_tag: tagvalue = src.getTag(dde_tag) if tagvalue: - if type(tagvalue) is str: + if isinstance(tagvalue, string_types): dde_cluster = tagvalue else: dde_cluster = src.getTag('cluster') diff --git a/cubical/data_handler/ms_data_handler.py b/cubical/data_handler/ms_data_handler.py index 26457985..5275f3a8 100644 --- a/cubical/data_handler/ms_data_handler.py +++ b/cubical/data_handler/ms_data_handler.py @@ -3,6 +3,7 @@ # http://github.com/ratt-ru/CubiCal # This code is distributed under the terms of GPLv2, see LICENSE.md for details from __future__ import print_function +from six import string_types import numpy as np from collections import OrderedDict import pyrap.tables as pt @@ -124,7 +125,7 @@ def _parse_bin(binspec, units, default_int=None, default_float=None, kind='bin') return default_int, default_float elif type(binspec) is int: return binspec, default_float - elif type(binspec) is str: + elif isinstance(binspec, string_types): for unit, multiplier in list(units.items()): if binspec.endswith(unit) and len(binspec) > len(unit): xval = binspec[:-len(unit)] @@ -935,7 +936,7 @@ def define_chunk(self, chunk_time, rebin_time, fdim=1, chunk_by=None, chunk_by_j # count of rows in output nrow_out = 0 # number of rows allocated in output - chunk_end_ts = chunk_end_time = None # current end-of-chunk boundary + chunk_end_ts = chunk_end_time = -1 # current end-of-chunk boundary # set chunk-by boundaries, if specified boundaries = np.zeros_like(time_col, bool) @@ -1059,9 +1060,7 @@ def define_chunk(self, chunk_time, rebin_time, fdim=1, chunk_by=None, chunk_by_j print(" generated {} row chunks based on time and DDID".format(len(chunklist)), file=log) # re-sort these row chunks into naturally increasing order (by first row of each chunk) - def _compare_chunks(a, b): - return cmp(a.rows[0], b.rows[0]) - chunklist.sort(cmp=_compare_chunks) + chunklist.sort(key=lambda x: x.rows[0]) if log.verbosity() > 2: print(" row chunks: {}".format(", ".join(["{} {}:{}".format(ch.tchunk, min(ch.rows0), max(ch.rows0)+1) for ch in chunklist])), file=log(3)) @@ -1168,7 +1167,7 @@ def define_flags(self, tile_list, flagopts): bitflags = flagging.Flagsets(self.ms) if auto_init: - if type(auto_init) is not str: + if not isinstance(auto_init, string_types): raise ValueError("Illegal --flags-auto-init setting -- a flagset name such as 'legacy' must be specified") if auto_init in bitflags.names(): print(" bitflag '{}' already exists, will not auto-fill".format(auto_init), file=log(0)) @@ -1197,7 +1196,7 @@ def define_flags(self, tile_list, flagopts): # --flags-apply specified as a bitmask, or a single string, or a single negated string, or a list of strings if type(apply_flags) is int: self._apply_bitflags = apply_flags - elif type(apply_flags) is not str: + elif not isinstance(apply_flags, string_types): raise ValueError("Illegal --flags-apply setting -- string or bitmask values expected") else: print(" BITFLAG column defines the following flagsets: {}".format( diff --git a/cubical/database/pickled_db.py b/cubical/database/pickled_db.py index 0aec467b..a437faa7 100644 --- a/cubical/database/pickled_db.py +++ b/cubical/database/pickled_db.py @@ -51,7 +51,7 @@ def _create(self, filename, metadata={}, backup=True, **kw): self.do_backup = backup self.metadata = OrderedDict(mode=self.MODE_FRAGMENTED, time=time.time(), **metadata) # we'll write to a temp file, and do a backup on successful closure - self._fobj = open(filename + ".tmp", 'w') + self._fobj = open(filename + ".tmp", 'wb') pickle.dump(self.metadata, self._fobj) self._fobj.flush() self._parameters = {} @@ -123,7 +123,7 @@ def _save_desc(self): for key in list(self._parameters.keys()): if not self._parameters[key]._populated: del self._parameters[key] - pickle.dump(self._parameters, open(self.filename + ".skel", 'w'), 2) + pickle.dump(self._parameters, open(self.filename + ".skel", 'wb'), 2) print("saved updated parameter skeletons to {}".format(self.filename + ".skel"), file=log(0)) def _backup_and_rename(self, backup): @@ -175,7 +175,7 @@ def save(self, filename=None, backup=True): class _Unpickler(Iterator): def __init__(self, filename): - self.fobj = open(filename) + self.fobj = open(filename, 'rb') self.metadata = pickle.load(self.fobj) if type(self.metadata) is not OrderedDict or not "mode" in self.metadata: raise IOError("{}: invalid metadata entry".format(filename)) @@ -187,6 +187,9 @@ def __next__(self): except EOFError: raise StopIteration + def next(self): + return self.__next__() + def _load(self, filename): """ Loads database from file. This will create arrays corresponding to the stored parameter @@ -231,7 +234,7 @@ def _load(self, filename): print(ModColor.Str("{} older than this code: will try to rebuild".format(descfile)), file=log(0)) else: try: - self._parameters = pickle.load(open(descfile, 'r')) + self._parameters = pickle.load(open(descfile, 'rb')) except: traceback.print_exc() print(ModColor.Str("error loading {}, will try to rebuild".format(descfile)), file=log(0)) diff --git a/cubical/flagging.py b/cubical/flagging.py index 40b8c942..a2980cff 100644 --- a/cubical/flagging.py +++ b/cubical/flagging.py @@ -9,6 +9,8 @@ # This is to keep matplotlib from falling over when no DISPLAY is set (which it otherwise does, # even if one is only trying to save figures to .png. from __future__ import print_function +from past.builtins import cmp +from functools import cmp_to_key import numpy as np import re @@ -88,7 +90,7 @@ def __init__ (self,ms): order = [] # form up "natural" order by comparing bitmasks bitwise_order = list(self.bits.keys()) - bitwise_order.sort(lambda a,b:cmp(self.bits[a],self.bits[b])) + bitwise_order.sort(key=cmp_to_key(lambda a,b:cmp(self.bits[a],self.bits[b]))) # if an order is specified, make sure it is actually valid, # and add any elements from bitwise_order that are not present self.order = [ fs for fs in order if fs in self.bits ] + \ diff --git a/cubical/main.py b/cubical/main.py index dfd3ad3d..43a72a98 100644 --- a/cubical/main.py +++ b/cubical/main.py @@ -14,6 +14,7 @@ # logging.getLogger('vext').setLevel(logging.WARNING) ## from __future__ import print_function +from six import string_types import pickle import os, os.path import sys @@ -219,7 +220,7 @@ def main(debugging=False): GD["out"]["name"] = basename # "GD" is a global defaults dict, containing options set up from parset + command line - pickle.dump(GD, open("cubical.last", "w")) + pickle.dump(GD, open("cubical.last", "wb")) # save parset with all settings. We refuse to clobber a parset with itself # (so e.g. "gocubical test.parset --Section-Option foo" does not overwrite test.parset) @@ -270,7 +271,7 @@ def main(debugging=False): solver_opts = GD["sol"] debug_opts = GD["debug"] sol_jones = solver_opts["jones"] - if type(sol_jones) is str: + if isinstance(sol_jones, string_types): sol_jones = set(sol_jones.split(',')) jones_opts = [GD[j.lower()] for j in sol_jones] # collect list of options from enabled Jones matrices @@ -382,7 +383,7 @@ def main(debugging=False): if type(subdirs) is int: subdirs = [subdirs] if subdirs: - if type(subdirs) is str: + if isinstance(subdirs, string_types): try: if ',' in subdirs: subdirs = list(map(int, subdirs.split(","))) @@ -431,7 +432,7 @@ def main(debugging=False): # set up chunking chunk_by = GD["data"]["chunk-by"] - if type(chunk_by) is str: + if isinstance(chunk_by, string_types): chunk_by = chunk_by.split(",") jump = float(GD["data"]["chunk-by-jump"]) diff --git a/cubical/tools/logger.py b/cubical/tools/logger.py index 757dcdf1..062fa202 100644 --- a/cubical/tools/logger.py +++ b/cubical/tools/logger.py @@ -7,6 +7,7 @@ # (c) Cyril Tasse et al., see http://github.com/saopicc/DDFacet from __future__ import print_function +from six import string_types import logging, logging.handlers, os, re, sys, multiprocessing from . import ModColor @@ -300,7 +301,7 @@ def setGlobalVerbosity(verbosity): # ensure verbosity is turned into a list. if type(verbosity) is int: verbosity = [verbosity] - elif type(verbosity) is str: + elif isinstance(verbosity, string_types): verbosity = verbosity.split(",") elif not isinstance(verbosity, (list, tuple)): raise TypeError("can't parse verbosity specification of type '{}'".format(type(verbosity))) @@ -325,7 +326,7 @@ def setGlobalLogVerbosity(verbosity): # ensure verbosity is turned into a list. if type(verbosity) is int: verbosity = [verbosity] - elif type(verbosity) is str: + elif isinstance(verbosity, string_types): verbosity = verbosity.split(",") elif not isinstance(verbosity, (list, tuple)): raise TypeError("can't parse verbosity specification of type '{}'".format(type(verbosity))) @@ -347,7 +348,7 @@ def setGlobalLogVerbosity(verbosity): def setSilent(Lname): """Silences the specified sublogger(s)""" log.print(ModColor.Str("set silent: %s" % Lname, col="red")) - if type(Lname) is str: + if isinstance(Lname, string_types): getLogger(Lname).logger.setLevel(logging.CRITICAL) elif type(Lname) is list: for name in Lname: @@ -357,7 +358,7 @@ def setSilent(Lname): def setLoud(Lname): """Un-silences the specified sublogger(s)""" log.print(ModColor.Str("set loud: %s" % Lname, col="green")) - if type(Lname) is str: + if isinstance(Lname, string_types): getLogger(Lname).logger.setLevel(logging.DEBUG) elif type(Lname) is list: for name in Lname: diff --git a/cubical/tools/shared_dict.py b/cubical/tools/shared_dict.py index badec967..de3da635 100644 --- a/cubical/tools/shared_dict.py +++ b/cubical/tools/shared_dict.py @@ -79,7 +79,7 @@ def load_impl(self): class PickleProxy(ItemProxy): def load_impl(self): - return pickle.load(file(self.path)) + return pickle.load(open(self.path, 'rb')) # this maps "class codes" parsed out of item filenames to appropriate item proxies. See reload() below _proxy_class_map = dict(a=SharedArrayProxy, d=SubdictProxy, p=PickleProxy) @@ -264,7 +264,7 @@ def __setitem__(self, item, value): value = dict1 # all other types, just use pickle else: - pickle.dump(value, file(path+'p', "w"), 2) + pickle.dump(value, open(path+'p', "wb"), 2) dict.__setitem__(self, item, value) def addSubdict (self, item): From 8a421e4e376ab993c647462f806120ff9dc55b94 Mon Sep 17 00:00:00 2001 From: Gijs Molenaar Date: Wed, 15 May 2019 15:42:28 +0200 Subject: [PATCH 06/25] improve compat with older python-casacore --- cubical/data_handler/ms_data_handler.py | 4 ++-- cubical/data_handler/ms_tile.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cubical/data_handler/ms_data_handler.py b/cubical/data_handler/ms_data_handler.py index 5275f3a8..a7a33b3b 100644 --- a/cubical/data_handler/ms_data_handler.py +++ b/cubical/data_handler/ms_data_handler.py @@ -759,7 +759,7 @@ def fetch(self, colname, first_row=0, nrows=-1, subset=None): Result of getcol(\*args, \*\*kwargs). """ - return (subset or self.data).getcol(colname, first_row, nrows) + return (subset or self.data).getcol(str(colname), first_row, nrows) def fetchslice(self, column, startrow=0, nrows=-1, subset=None): """ @@ -826,7 +826,7 @@ def putslice(self, column, value, startrow=0, nrows=-1, subset=None): # if no slicing, just use putcol to put the whole thing. This always works, # unless the MS is screwed up if self._ms_blc == None: - return subset.putcol(column, value, startrow, nrows) + return subset.putcol(str(column), value, startrow, nrows) if nrows<0: nrows = subset.nrows() diff --git a/cubical/data_handler/ms_tile.py b/cubical/data_handler/ms_tile.py index 6ca5732d..82c201a5 100644 --- a/cubical/data_handler/ms_tile.py +++ b/cubical/data_handler/ms_tile.py @@ -652,7 +652,7 @@ def load(self, load_model=True): wcol = wcol_cache.get(weight_col) if wcol is None: print("model {} weights {}: reading from {}{}".format(imod, iwcol, weight_col, mean_corr), file=log(0)) - wcol = table_subset.getcol(weight_col) + wcol = table_subset.getcol(str(weight_col)) # support two shapes of wcol: either same as data (a-la WEIGHT_SPECTRUM), or missing # a frequency axis (a-la WEIGHT) if wcol.ndim == 3: From 747e1c6d1d2e457946fde892852ec4bf55fafe08 Mon Sep 17 00:00:00 2001 From: Gijs Molenaar Date: Wed, 15 May 2019 16:14:41 +0200 Subject: [PATCH 07/25] more old python-casascore compat code --- cubical/database/casa_db_adaptor.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cubical/database/casa_db_adaptor.py b/cubical/database/casa_db_adaptor.py index 0fa3e353..11dac2da 100644 --- a/cubical/database/casa_db_adaptor.py +++ b/cubical/database/casa_db_adaptor.py @@ -53,7 +53,7 @@ def init_empty(cls, db, filename, solfreqs, solants, field_ndir=1, is_complex=Tr os.rename(os.path.join(basedir, BLANK_TABLE_NAME), filename) antorder = [db.antnames.index(an) for an in solants] - with tbl("%s::ANTENNA" % filename, ack=False, readonly=False) as t: + with tbl("%s::ANTENNA" % str(filename), ack=False, readonly=False) as t: t.addrows(nrows=len(db.anttype)) t.putcol("OFFSET", db.antoffset[antorder]) t.putcol("POSITION", db.antpos[antorder]) @@ -67,7 +67,7 @@ def init_empty(cls, db, filename, solfreqs, solants, field_ndir=1, is_complex=Tr assert "field" in db.metadata, "Solver field not passed in metadata. This is a bug" assert type(db.metadata["field"]) is int, "Currently only supports single field" selfield = np.arange(len(db.fieldname)) == db.metadata["field"] - with tbl("%s::FIELD" % filename, ack=False, readonly=False) as t: + with tbl("%s::FIELD" % str(filename), ack=False, readonly=False) as t: t.addrows(nrows=field_ndir) t.putcol("DELAY_DIR", np.tile(db.fielddelaydirs[selfield], (field_ndir, 1))) t.putcol("PHASE_DIR", np.tile(db.fieldphasedirs[selfield], (field_ndir, 1))) @@ -78,7 +78,7 @@ def init_empty(cls, db, filename, solfreqs, solants, field_ndir=1, is_complex=Tr t.putcol("SOURCE_ID", np.tile(db.fieldsrcid[selfield], (field_ndir, 1)) + np.arange(field_ndir).T) t.putcol("TIME", np.tile(db.fieldtime[selfield], (field_ndir, 1))) - with tbl("%s::OBSERVATION" % filename, ack=False, readonly=False) as t: + with tbl("%s::OBSERVATION" % str(filename), ack=False, readonly=False) as t: t.addrows(nrows=len(db.obsobserver)) (len(db.obstimerange) != 0) and t.putcol("TIME_RANGE", db.obstimerange) (len(db.obslog) != 0) and t.putcol("LOG", db.obslog) @@ -89,7 +89,7 @@ def init_empty(cls, db, filename, solfreqs, solants, field_ndir=1, is_complex=Tr (len(db.obsreleasedate) != 0) and t.putcol("RELEASE_DATE", db.obsreleasedate) (len(db.obstelescopename) != 0) and t.putcol("TELESCOPE_NAME", db.obstelescopename) - with tbl("%s::SPECTRAL_WINDOW" % filename, ack=False, readonly=False) as t: + with tbl("%s::SPECTRAL_WINDOW" % str(filename), ack=False, readonly=False) as t: t.addrows(nrows=len(db.sel_ddids)) # Per DDID determine solution spacing in frequency for iddid, ddid in enumerate(db.sel_ddids): @@ -124,7 +124,7 @@ def init_empty(cls, db, filename, solfreqs, solants, field_ndir=1, is_complex=Tr t.putcell("NUM_CHAN", iddid, ddsolfreqs.size) t.putcell("TOTAL_BANDWIDTH", iddid, maxfreq - minfreq) - with tbl(filename, ack=False, readonly=False) as t: + with tbl(str(filename), ack=False, readonly=False) as t: t.putkeyword("ParType", "Complex" if is_complex else "Float") t.putkeyword("VisCal", viscal_label) @@ -241,7 +241,7 @@ def create_B_table(cls, db, gname, outname = "B", diag=True): field_ndir=ndir, viscal_label="B Jones" if diag else "D Jones") - with tbl(db.filename + ".%s.casa" % outname, ack=False, readonly=False) as t: + with tbl(str(db.filename) + ".%s.casa" % outname, ack=False, readonly=False) as t: t.addrows(nrows=nrow) for iddid, ddid in enumerate(db.sel_ddids): From 84f7710df9783ca05f361eb21d044044bbf8c665 Mon Sep 17 00:00:00 2001 From: Gijs Molenaar Date: Thu, 16 May 2019 14:51:21 +0200 Subject: [PATCH 08/25] finishing touch --- cubical/statistics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cubical/statistics.py b/cubical/statistics.py index 3a6383be..b6e74c3d 100644 --- a/cubical/statistics.py +++ b/cubical/statistics.py @@ -98,7 +98,7 @@ def save(self, filename): """ pickle.dump( - (self.chanant, self.timeant, self.timechan, self.chunk), open(filename, 'w'), 2) + (self.chanant, self.timeant, self.timechan, self.chunk), open(filename, 'wb'), 2) def load(self, fileobj): """ From 1a995551abb60a010e27cde6051c5cb5a3410fc5 Mon Sep 17 00:00:00 2001 From: Gijs Molenaar Date: Thu, 16 May 2019 16:04:11 +0200 Subject: [PATCH 09/25] add ben feedback --- cubical/data_handler/MBTiggerSim.py | 9 +-------- cubical/data_handler/ms_data_handler.py | 3 ++- cubical/data_handler/ms_tile.py | 3 +-- cubical/database/casa_db_adaptor.py | 2 +- cubical/database/parameter.py | 1 + cubical/database/pickled_db.py | 3 ++- cubical/flagging.py | 1 + cubical/machines/interval_gain_machine.py | 1 + cubical/machines/jones_chain_machine.py | 1 + cubical/machines/jones_chain_robust_machine.py | 1 + cubical/machines/parallactic_machine.py | 1 + cubical/madmax/flagger.py | 3 ++- cubical/madmax/plots.py | 1 + cubical/main.py | 3 ++- cubical/param_db.py | 1 - cubical/plots/__init__.py | 1 + cubical/plots/ifrgains.py | 1 + cubical/plots/stats.py | 1 + cubical/statistics.py | 3 ++- cubical/tools/ClassPrint.py | 1 + cubical/tools/NpShared.py | 1 + cubical/workers.py | 1 + setup.py | 1 + 23 files changed, 27 insertions(+), 17 deletions(-) diff --git a/cubical/data_handler/MBTiggerSim.py b/cubical/data_handler/MBTiggerSim.py index 2fbb7d8e..afed7b77 100644 --- a/cubical/data_handler/MBTiggerSim.py +++ b/cubical/data_handler/MBTiggerSim.py @@ -5,16 +5,9 @@ """ Handles the interface between measurement sets, CubiCal and Montblanc. """ - -import collections -import functools -import types - +from builtins import range import numpy as np -import pyrap.tables as pt - import montblanc -import logging import montblanc.util as mbu import montblanc.impl.rime.tensorflow.ms.ms_manager as MS diff --git a/cubical/data_handler/ms_data_handler.py b/cubical/data_handler/ms_data_handler.py index a7a33b3b..6a3377fc 100644 --- a/cubical/data_handler/ms_data_handler.py +++ b/cubical/data_handler/ms_data_handler.py @@ -3,11 +3,12 @@ # http://github.com/ratt-ru/CubiCal # This code is distributed under the terms of GPLv2, see LICENSE.md for details from __future__ import print_function +from builtins import range from six import string_types import numpy as np from collections import OrderedDict import pyrap.tables as pt -import pickle +from future.moves import pickle import re import traceback import math diff --git a/cubical/data_handler/ms_tile.py b/cubical/data_handler/ms_tile.py index 82c201a5..349bf560 100644 --- a/cubical/data_handler/ms_tile.py +++ b/cubical/data_handler/ms_tile.py @@ -3,10 +3,9 @@ # http://github.com/ratt-ru/CubiCal # This code is distributed under the terms of GPLv2, see LICENSE.md for details from __future__ import print_function +from builtins import range import numpy as np from collections import OrderedDict -import traceback -import itertools from cubical.tools import shared_dict from cubical.flagging import FL diff --git a/cubical/database/casa_db_adaptor.py b/cubical/database/casa_db_adaptor.py index 11dac2da..ca757162 100644 --- a/cubical/database/casa_db_adaptor.py +++ b/cubical/database/casa_db_adaptor.py @@ -2,7 +2,7 @@ # (c) 2017 Rhodes University & Jonathan S. Kenyon # http://github.com/ratt-ru/CubiCal # This code is distributed under the terms of GPLv2, see LICENSE.md for details - +from builtins import range from cubical.database.pickled_db import PickledDatabase from cubical.data_handler.ms_data_handler import MSDataHandler from cubical.tools import logger diff --git a/cubical/database/parameter.py b/cubical/database/parameter.py index 1370db68..28db6cb7 100644 --- a/cubical/database/parameter.py +++ b/cubical/database/parameter.py @@ -6,6 +6,7 @@ Handles parameter databases which can contain solutions and other relevant values. """ from __future__ import print_function +from builtins import range import numpy as np from numpy.ma import masked_array from cubical.tools import logger diff --git a/cubical/database/pickled_db.py b/cubical/database/pickled_db.py index a437faa7..4f3b338c 100644 --- a/cubical/database/pickled_db.py +++ b/cubical/database/pickled_db.py @@ -6,7 +6,8 @@ Handles parameter databases which can contain solutions and other relevant values. """ from __future__ import print_function -import pickle, os, os.path +from future.moves import pickle +import os, os.path import numpy as np import traceback from cubical.tools import logger, ModColor diff --git a/cubical/flagging.py b/cubical/flagging.py index a2980cff..74a75a6e 100644 --- a/cubical/flagging.py +++ b/cubical/flagging.py @@ -9,6 +9,7 @@ # This is to keep matplotlib from falling over when no DISPLAY is set (which it otherwise does, # even if one is only trying to save figures to .png. from __future__ import print_function +from builtins import range from past.builtins import cmp from functools import cmp_to_key import numpy as np diff --git a/cubical/machines/interval_gain_machine.py b/cubical/machines/interval_gain_machine.py index 1563824e..23f29912 100644 --- a/cubical/machines/interval_gain_machine.py +++ b/cubical/machines/interval_gain_machine.py @@ -3,6 +3,7 @@ # http://github.com/ratt-ru/CubiCal # This code is distributed under the terms of GPLv2, see LICENSE.md for details from __future__ import print_function +from builtins import range import numpy as np from cubical.flagging import FL from cubical.machines.abstract_machine import MasterMachine diff --git a/cubical/machines/jones_chain_machine.py b/cubical/machines/jones_chain_machine.py index 1a50161a..a53a5dce 100644 --- a/cubical/machines/jones_chain_machine.py +++ b/cubical/machines/jones_chain_machine.py @@ -3,6 +3,7 @@ # http://github.com/ratt-ru/CubiCal # This code is distributed under the terms of GPLv2, see LICENSE.md for details from __future__ import print_function +from builtins import range from cubical.machines.abstract_machine import MasterMachine from cubical.machines.complex_2x2_machine import Complex2x2Gains from cubical.machines.complex_W_2x2_machine import ComplexW2x2Gains diff --git a/cubical/machines/jones_chain_robust_machine.py b/cubical/machines/jones_chain_robust_machine.py index 73b4cd5e..a0b98a3f 100644 --- a/cubical/machines/jones_chain_robust_machine.py +++ b/cubical/machines/jones_chain_robust_machine.py @@ -3,6 +3,7 @@ # http://github.com/ratt-ru/CubiCal # This code is distributed under the terms of GPLv2, see LICENSE.md for details from __future__ import print_function +from builtins import range from cubical.machines.abstract_machine import MasterMachine from cubical.machines.complex_W_2x2_machine import ComplexW2x2Gains import numpy as np diff --git a/cubical/machines/parallactic_machine.py b/cubical/machines/parallactic_machine.py index 8e0103d9..fb529d7e 100644 --- a/cubical/machines/parallactic_machine.py +++ b/cubical/machines/parallactic_machine.py @@ -1,5 +1,6 @@ from __future__ import print_function +from builtins import range import pyrap.quanta as pq import pyrap.measures pm = pyrap.measures.measures() diff --git a/cubical/madmax/flagger.py b/cubical/madmax/flagger.py index 3a8f84b1..73ffb83d 100644 --- a/cubical/madmax/flagger.py +++ b/cubical/madmax/flagger.py @@ -1,4 +1,5 @@ from __future__ import print_function +from builtins import range import numpy as np import os, os.path import traceback @@ -315,7 +316,7 @@ def beyond_thunderdome(self, resid_arr, data_arr, model_arr, flags_arr, threshol filename = self.get_plot_filename('mads') print("{}: saving MAD distribution plot to {}".format(self.chunk_label,filename), file=log(1)) figure.savefig(filename, dpi=300) - import pickle + from future.moves import pickle pickle_file = filename+".cp" pickle.dump((mad, medmad, med_thr, self.metadata, max_label), open(pickle_file, "w"), 2) print("{}: pickling MAD distribution to {}".format(self.chunk_label, pickle_file), file=log(1)) diff --git a/cubical/madmax/plots.py b/cubical/madmax/plots.py index 3b16b6c4..94e3ca40 100644 --- a/cubical/madmax/plots.py +++ b/cubical/madmax/plots.py @@ -1,4 +1,5 @@ from __future__ import print_function +from builtins import range import numpy as np import traceback diff --git a/cubical/main.py b/cubical/main.py index 43a72a98..f2d37199 100644 --- a/cubical/main.py +++ b/cubical/main.py @@ -14,8 +14,9 @@ # logging.getLogger('vext').setLevel(logging.WARNING) ## from __future__ import print_function +from builtins import range from six import string_types -import pickle +from future.moves import pickle import os, os.path import sys import warnings diff --git a/cubical/param_db.py b/cubical/param_db.py index 0da5bb15..96ccf4b8 100644 --- a/cubical/param_db.py +++ b/cubical/param_db.py @@ -5,7 +5,6 @@ """ Handles parameter databases which can contain solutions and other relevant values. """ - import numpy as np from cubical.tools import logger log = logger.getLogger("param_db") diff --git a/cubical/plots/__init__.py b/cubical/plots/__init__.py index a60afda4..f1838254 100644 --- a/cubical/plots/__init__.py +++ b/cubical/plots/__init__.py @@ -1,4 +1,5 @@ import numpy as np +from builtins import range # these control the layout of saved plots DPI = 150. # resolution: determines size of text relative to plots diff --git a/cubical/plots/ifrgains.py b/cubical/plots/ifrgains.py index 49c50b3f..14936831 100644 --- a/cubical/plots/ifrgains.py +++ b/cubical/plots/ifrgains.py @@ -1,4 +1,5 @@ from __future__ import print_function +from builtins import range import math,cmath import numpy as np import numpy.ma as ma diff --git a/cubical/plots/stats.py b/cubical/plots/stats.py index 4b1fdf29..615db017 100644 --- a/cubical/plots/stats.py +++ b/cubical/plots/stats.py @@ -6,6 +6,7 @@ Creates summary plots using the solver stats. """ from __future__ import print_function +from builtins import range import numpy as np from cubical.tools import logger log = logger.getLogger("plots") diff --git a/cubical/statistics.py b/cubical/statistics.py index b6e74c3d..db8569d8 100644 --- a/cubical/statistics.py +++ b/cubical/statistics.py @@ -6,9 +6,10 @@ Handles solver statistics. """ from __future__ import print_function +from builtins import range import math import numpy as np -import pickle +from future.moves import pickle from cubical.tools import logger from cubical.tools import ModColor diff --git a/cubical/tools/ClassPrint.py b/cubical/tools/ClassPrint.py index d10b85e7..dc50e471 100644 --- a/cubical/tools/ClassPrint.py +++ b/cubical/tools/ClassPrint.py @@ -6,6 +6,7 @@ # This module has been adapted from the DDFacet package, # (c) Cyril Tasse et al., see http://github.com/saopicc/DDFacet from __future__ import print_function +from builtins import range import os import sys from . import ModColor diff --git a/cubical/tools/NpShared.py b/cubical/tools/NpShared.py index e7159eab..d9090568 100644 --- a/cubical/tools/NpShared.py +++ b/cubical/tools/NpShared.py @@ -9,6 +9,7 @@ #import sharedarray.SharedArray as SharedArray from __future__ import print_function +from builtins import range import SharedArray from . import ModColor from . import logger diff --git a/cubical/workers.py b/cubical/workers.py index 4bdb1bac..9b1f82a1 100644 --- a/cubical/workers.py +++ b/cubical/workers.py @@ -1,4 +1,5 @@ from __future__ import print_function +from builtins import range import multiprocessing, os, sys, traceback import concurrent.futures as cf import re diff --git a/setup.py b/setup.py index ea8b8972..45d563fe 100644 --- a/setup.py +++ b/setup.py @@ -134,6 +134,7 @@ def run(self): 'scipy'] else: requirements = ['future', + 'builtins', 'numpy', 'futures', 'python-casacore>=2.1.2', From 4da02934c51efc8a9a37311637bf69a492881d96 Mon Sep 17 00:00:00 2001 From: Gijs Molenaar Date: Mon, 20 May 2019 16:44:16 +0200 Subject: [PATCH 10/25] this should not be here --- setup.py | 1 - 1 file changed, 1 deletion(-) diff --git a/setup.py b/setup.py index 45d563fe..ea8b8972 100644 --- a/setup.py +++ b/setup.py @@ -134,7 +134,6 @@ def run(self): 'scipy'] else: requirements = ['future', - 'builtins', 'numpy', 'futures', 'python-casacore>=2.1.2', From e4c6fdbf540d8557b7b4e2d48f77a780269e37a8 Mon Sep 17 00:00:00 2001 From: Gijs Molenaar Date: Tue, 21 May 2019 13:36:20 +0200 Subject: [PATCH 11/25] fix py2 compat --- cubical/database/casa_db_adaptor.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cubical/database/casa_db_adaptor.py b/cubical/database/casa_db_adaptor.py index ca757162..51b90712 100644 --- a/cubical/database/casa_db_adaptor.py +++ b/cubical/database/casa_db_adaptor.py @@ -167,7 +167,7 @@ def create_G_table(cls, db, gname, outname = "Gphase"): field_ndir=ndir, viscal_label="G Jones") - with tbl(db.filename + ".%s.casa" % outname, ack=False, readonly=False) as t: + with tbl(str(db.filename + ".%s.casa" % outname), ack=False, readonly=False) as t: t.addrows(nrows=nrow) for iddid, ddid in enumerate(db.sel_ddids): spwid = db.ddid_spw_map[ddid] @@ -321,7 +321,7 @@ def create_K_table(cls, db, gname, outname = "K"): is_complex=False, viscal_label="K Jones") - with tbl(db.filename + ".%s.casa" % outname, ack=False, readonly=False) as t: + with tbl(str(db.filename + ".%s.casa" % outname), ack=False, readonly=False) as t: t.addrows(nrows=nrow) for iddid, ddid in enumerate(db.sel_ddids): spwid = db.ddid_spw_map[ddid] From 483662a4295a383529d391b4b71aa022d841d717 Mon Sep 17 00:00:00 2001 From: Benna Hugo Date: Wed, 22 May 2019 14:43:01 +0200 Subject: [PATCH 12/25] Update with Jenkins.sh file --- Dockerfile | 22 ++++++++++++---------- Jenkinsfile.sh | 20 ++++++++++++++++++++ 2 files changed, 32 insertions(+), 10 deletions(-) create mode 100644 Jenkinsfile.sh diff --git a/Dockerfile b/Dockerfile index c852bba3..ad3440d4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,17 @@ FROM kernsuite/base:3 MAINTAINER Ben Hugo "bhugo@ska.ac.za" +ENV DEB_DEPENDENCIES casacore-dev \ + casacore-data \ + build-essential \ + python-pip \ + libboost-all-dev \ + wcslib-dev \ + libcfitsio3-dev +RUN apt-get update +RUN apt-get install -y $DEB_DEPENDENCIES +RUN pip install -U pip wheel setuptools + RUN mkdir /src RUN mkdir /src/cubical ADD cubical /src/cubical/cubical @@ -20,16 +31,7 @@ ADD rtd_requirements.txt /src/cubical/rtd_requirements.txt ADD setup.py /src/cubical/setup.py WORKDIR /src/cubical -ENV DEB_DEPENDENCIES casacore-dev \ - casacore-data \ - build-essential \ - python-pip \ - libboost-all-dev \ - wcslib-dev \ - libcfitsio3-dev -RUN apt-get update -RUN apt-get install -y $DEB_DEPENDENCIES -RUN pip install -U pip wheel setuptools + RUN pip install -r requirements.txt RUN python setup.py gocythonize RUN pip install -U . diff --git a/Jenkinsfile.sh b/Jenkinsfile.sh new file mode 100644 index 00000000..3f29968a --- /dev/null +++ b/Jenkinsfile.sh @@ -0,0 +1,20 @@ +WORKSPACE_ROOT="$WORKSPACE/$BUILD_NUMBER" +TEST_OUTPUT_DIR="$WORKSPACE_ROOT/test-output" +TEST_DATA_DIR="$WORKSPACE/../../../test-data" +mkdir $TEST_OUTPUT_DIR + +# build and testrun +docker build -t cubical:${BUILD_NUMBER} ${WORKSPACE_ROOT}/projects/Cubical/ +docker run --rm cubical:${BUILD_NUMBER} + +#run tests +docker run --rm -m 100g --cap-add sys_ptrace \ + --memory-swap=-1 \ + --shm-size=150g \ + --rm=true \ + --name=cubical$BUILD_NUMBER \ + -v ${TEST_OUTPUT_DIR}:/workspace \ + -v ${TEST_OUTPUT_DIR}:/root/tmp \ + --entrypoint /bin/bash \ + cubical:${BUILD_NUMBER} \ + -c "cd /src/cubical && apt-get install -y git && pip install -r requirements.test.txt && nosetests --with-xunit --xunit-file /workspace/nosetests.xml test" \ No newline at end of file From 7c7bc554df5549366f498b72c27ede1eac6334f4 Mon Sep 17 00:00:00 2001 From: Benna Hugo Date: Wed, 22 May 2019 17:12:38 +0200 Subject: [PATCH 13/25] Add py2 and py3 tests for 16.04 and 18.04 --- Dockerfile => .jenkins/1604.py2.docker | 0 .jenkins/1804.py2.docker | 40 ++++++++++++++++++++++++++ .jenkins/1804.py3.docker | 40 ++++++++++++++++++++++++++ Jenkinsfile.sh | 32 ++++++++++++--------- setup.py | 18 +++++++++++- 5 files changed, 116 insertions(+), 14 deletions(-) rename Dockerfile => .jenkins/1604.py2.docker (100%) create mode 100644 .jenkins/1804.py2.docker create mode 100644 .jenkins/1804.py3.docker diff --git a/Dockerfile b/.jenkins/1604.py2.docker similarity index 100% rename from Dockerfile rename to .jenkins/1604.py2.docker diff --git a/.jenkins/1804.py2.docker b/.jenkins/1804.py2.docker new file mode 100644 index 00000000..01223b84 --- /dev/null +++ b/.jenkins/1804.py2.docker @@ -0,0 +1,40 @@ +FROM kernsuite/base:5 +MAINTAINER Ben Hugo "bhugo@ska.ac.za" + +ENV DEB_DEPENDENCIES casacore-dev \ + casacore-data \ + build-essential \ + python-pip \ + libboost-all-dev \ + wcslib-dev \ + libcfitsio-dev +RUN apt-get update +RUN apt-get install -y $DEB_DEPENDENCIES +RUN pip install -U pip wheel setuptools + +RUN mkdir /src +RUN mkdir /src/cubical +ADD cubical /src/cubical/cubical +ADD docs /src/cubical/docs +ADD examples /src/cubical/examples +ADD test /src/cubical/test +ADD .gitattributes /src/cubical/.gitattributes +ADD .gitignore /src/cubical/.gitignore +ADD .git /src/cubical/.git +ADD HEADER /src/cubical/HEADER +ADD LICENSE.md /src/cubical/LICENSE.md +ADD MANIFEST.in /src/cubical/MANIFEST.in +ADD README.md /src/cubical/README.md +ADD requirements.txt /src/cubical/requirements.txt +ADD requirements.test.txt /src/cubical/requirements.test.txt +ADD rtd_requirements.txt /src/cubical/rtd_requirements.txt +ADD setup.py /src/cubical/setup.py + +WORKDIR /src/cubical + +RUN pip install -r requirements.txt +RUN python2.7 setup.py gocythonize +RUN pip install -U . + +ENTRYPOINT ["gocubical"] +CMD ["--help"] diff --git a/.jenkins/1804.py3.docker b/.jenkins/1804.py3.docker new file mode 100644 index 00000000..d9d71b5d --- /dev/null +++ b/.jenkins/1804.py3.docker @@ -0,0 +1,40 @@ +FROM kernsuite/base:5 +MAINTAINER Ben Hugo "bhugo@ska.ac.za" + +ENV DEB_DEPENDENCIES casacore-dev \ + casacore-data \ + build-essential \ + python3.7 \ + python3-pip \ + libboost-all-dev \ + wcslib-dev \ + libcfitsio-dev +RUN apt-get update +RUN apt-get install -y $DEB_DEPENDENCIES +RUN pip3 install -U pip wheel setuptools + +RUN mkdir /src +RUN mkdir /src/cubical +ADD cubical /src/cubical/cubical +ADD docs /src/cubical/docs +ADD examples /src/cubical/examples +ADD test /src/cubical/test +ADD .gitattributes /src/cubical/.gitattributes +ADD .gitignore /src/cubical/.gitignore +ADD .git /src/cubical/.git +ADD HEADER /src/cubical/HEADER +ADD LICENSE.md /src/cubical/LICENSE.md +ADD MANIFEST.in /src/cubical/MANIFEST.in +ADD README.md /src/cubical/README.md +ADD requirements.txt /src/cubical/requirements.txt +ADD requirements.test.txt /src/cubical/requirements.test.txt +ADD rtd_requirements.txt /src/cubical/rtd_requirements.txt +ADD setup.py /src/cubical/setup.py + +WORKDIR /src/cubical +RUN pip3 install -r requirements.txt +RUN python3 setup.py gocythonize +RUN pip3 install -U . + +ENTRYPOINT ["gocubical"] +CMD ["--help"] diff --git a/Jenkinsfile.sh b/Jenkinsfile.sh index 3f29968a..a62ee133 100644 --- a/Jenkinsfile.sh +++ b/Jenkinsfile.sh @@ -4,17 +4,23 @@ TEST_DATA_DIR="$WORKSPACE/../../../test-data" mkdir $TEST_OUTPUT_DIR # build and testrun -docker build -t cubical:${BUILD_NUMBER} ${WORKSPACE_ROOT}/projects/Cubical/ -docker run --rm cubical:${BUILD_NUMBER} - +docker build -f ${WORKSPACE_ROOT}/projects/Cubical/.jenkins/1604.py2.docker -t cubical.1604.py2:${BUILD_NUMBER} ${WORKSPACE_ROOT}/projects/Cubical/ +docker run --rm cubical.1604.py2:${BUILD_NUMBER} +docker build -f ${WORKSPACE_ROOT}/projects/Cubical/.jenkins/1804.py2.docker -t cubical.1804.py2:${BUILD_NUMBER} ${WORKSPACE_ROOT}/projects/Cubical/ +docker run --rm cubical.1804.py2:${BUILD_NUMBER} +docker build -f ${WORKSPACE_ROOT}/projects/Cubical/.jenkins/1804.py3.docker -t cubical.1804.py3:${BUILD_NUMBER} ${WORKSPACE_ROOT}/projects/Cubical/ +docker run --rm cubical.1804.py3:${BUILD_NUMBER} #run tests -docker run --rm -m 100g --cap-add sys_ptrace \ - --memory-swap=-1 \ - --shm-size=150g \ - --rm=true \ - --name=cubical$BUILD_NUMBER \ - -v ${TEST_OUTPUT_DIR}:/workspace \ - -v ${TEST_OUTPUT_DIR}:/root/tmp \ - --entrypoint /bin/bash \ - cubical:${BUILD_NUMBER} \ - -c "cd /src/cubical && apt-get install -y git && pip install -r requirements.test.txt && nosetests --with-xunit --xunit-file /workspace/nosetests.xml test" \ No newline at end of file +for img in 1604.py2 1804.py2 1804.py3; +do + docker run --rm -m 100g --cap-add sys_ptrace \ + --memory-swap=-1 \ + --shm-size=150g \ + --rm=true \ + --name=cubical$BUILD_NUMBER \ + -v ${TEST_OUTPUT_DIR}:/workspace \ + -v ${TEST_OUTPUT_DIR}:/root/tmp \ + --entrypoint /bin/bash \ + cubical.${img}:${BUILD_NUMBER} \ + -c "cd /src/cubical && apt-get install -y git && pip install -r requirements.test.txt && nosetests --with-xunit --xunit-file /workspace/nosetests.xml test" +done \ No newline at end of file diff --git a/setup.py b/setup.py index ea8b8972..c7ce93d8 100644 --- a/setup.py +++ b/setup.py @@ -56,6 +56,22 @@ except ImportError: cythonize = None +preinstall_dependencies = ["'six >= 1.12.0'"] +try: + import six +except ImportError as e: + import subprocess + import pip + subprocess.call(["cd .. && pip install %s" % + (" ".join(preinstall_dependencies)), ""], shell=True) + subprocess.call(["cd .. && pip3 install %s" % + (" ".join(preinstall_dependencies)), ""], shell=True) + try: + import six + except ImportError as e: + raise ImportError("Six autoinstall failed. Please install Python 2.x compatibility package six before running Cubical install") + + cmpl_args = ['-ffast-math', '-O2', '-march=native', @@ -107,7 +123,7 @@ def run(self): extra_link_args=link_args_omp if omp else link_args, language="c++" if cpp else "c")) - cythonize(extensions, compiler_directives={'binding': True}, annotate=True, force=self.force) + cythonize(extensions, compiler_directives={'binding': True, 'language_level' : "3" if six.PY3 else "2"}, annotate=True, force=self.force) extensions = [] From ebd45e0a4a99a97500da7f4d0cd23fb0835c79af Mon Sep 17 00:00:00 2001 From: Benna Hugo Date: Thu, 23 May 2019 11:36:47 +0200 Subject: [PATCH 14/25] fix residual python3 build issues --- Jenkinsfile.sh | 1 + cubical/kernels/cyfull_experimental.pyx | 88 ++++++++++++------------- 2 files changed, 45 insertions(+), 44 deletions(-) diff --git a/Jenkinsfile.sh b/Jenkinsfile.sh index a62ee133..efd22e11 100644 --- a/Jenkinsfile.sh +++ b/Jenkinsfile.sh @@ -10,6 +10,7 @@ docker build -f ${WORKSPACE_ROOT}/projects/Cubical/.jenkins/1804.py2.docker -t c docker run --rm cubical.1804.py2:${BUILD_NUMBER} docker build -f ${WORKSPACE_ROOT}/projects/Cubical/.jenkins/1804.py3.docker -t cubical.1804.py3:${BUILD_NUMBER} ${WORKSPACE_ROOT}/projects/Cubical/ docker run --rm cubical.1804.py3:${BUILD_NUMBER} + #run tests for img in 1604.py2 1804.py2 1804.py3; do diff --git a/cubical/kernels/cyfull_experimental.pyx b/cubical/kernels/cyfull_experimental.pyx index 79046c20..21142abe 100644 --- a/cubical/kernels/cyfull_experimental.pyx +++ b/cubical/kernels/cyfull_experimental.pyx @@ -137,9 +137,9 @@ def cycompute_residual(complex3264 [:,:,:,:,:,:,:,:] m, for ab in xrange(n_ant): for i in xrange(n_mod): for t in xrange(n_tim): - rr = t/t_int + rr = t//t_int for f in xrange(n_fre): - rc = f/f_int + rc = f//f_int for d in xrange(n_dir): r[i,t,f,aa,ab,0,0] = r[i,t,f,aa,ab,0,0] - ( g[d,rr,rc,aa,0,0]*m[d,i,t,f,aa,ab,0,0]*gh[d,rr,rc,ab,0,0] + \ @@ -211,9 +211,9 @@ def cycompute_residual_nomp(complex3264 [:,:,:,:,:,:,:,:] m, for ab in xrange(n_ant): for i in xrange(n_mod): for t in xrange(n_tim): - rr = t/t_int + rr = t//t_int for f in xrange(n_fre): - rc = f/f_int + rc = f//f_int for d in xrange(n_dir): r[i,t,f,aa,ab,0,0] = r[i,t,f,aa,ab,0,0] - ( g[d,rr,rc,aa,0,0]*m[d,i,t,f,aa,ab,0,0]*gh[d,rr,rc,ab,0,0] + \ @@ -297,9 +297,9 @@ def cycompute_residual_nomp_conj1(complex3264 [:,:,:,:,:,:,:,:] m, for ab in xrange(aa+1,n_ant): for i in xrange(n_mod): for t in xrange(n_tim): - rr = t/t_int + rr = t//t_int for f in xrange(n_fre): - rc = f/f_int + rc = f//f_int for d in xrange(n_dir): r[i,t,f,aa,ab,0,0] = r[i,t,f,aa,ab,0,0] - ( g[d,rr,rc,aa,0,0]*m[d,i,t,f,aa,ab,0,0]*gh[d,rr,rc,ab,0,0] + \ @@ -373,9 +373,9 @@ def cycompute_residual_nomp_conj3(complex3264 [:,:,:,:,:,:,:,:] m, for ab in xrange(aa+1,n_ant): for i in xrange(n_mod): for t in xrange(n_tim): - rr = t/t_int + rr = t//t_int for f in xrange(n_fre): - rc = f/f_int + rc = f//f_int for d in xrange(n_dir): subtract_mat_product(&r[i,t,f,aa,ab,0,0], &g[d,rr,rc,aa,0,0], &m[d,i,t,f,aa,ab,0,0], &gh[d,rr,rc,ab,0,0]) mat_conjugate(&r[i,t,f,ab,aa,0,0], &r[i,t,f,aa,ab,0,0]) @@ -426,9 +426,9 @@ def cycompute_residual_nomp_conj2(complex3264 [:,:,:,:,:,:,:,:] m, for ab in xrange(n_ant): for i in xrange(n_mod): for t in xrange(n_tim): - rr = t/t_int + rr = t//t_int for f in xrange(n_fre): - rc = f/f_int + rc = f//f_int if aa Date: Fri, 24 May 2019 19:25:11 +0200 Subject: [PATCH 15/25] Fix residual issues with montblanc and py3 cython kernels --- .jenkins/1604.py2.docker | 4 +++- .jenkins/1804.py2.docker | 2 ++ .jenkins/1804.py3.docker | 11 ++++++----- Jenkinsfile.sh | 2 +- cubical/kernels/cygenerics.pyx | 2 +- requirements3.txt | 3 +++ setup.py | 2 +- 7 files changed, 17 insertions(+), 9 deletions(-) create mode 100644 requirements3.txt diff --git a/.jenkins/1604.py2.docker b/.jenkins/1604.py2.docker index ad3440d4..28b22fac 100644 --- a/.jenkins/1604.py2.docker +++ b/.jenkins/1604.py2.docker @@ -31,8 +31,10 @@ ADD rtd_requirements.txt /src/cubical/rtd_requirements.txt ADD setup.py /src/cubical/setup.py WORKDIR /src/cubical - +RUN pip install -U pip setuptools wheel RUN pip install -r requirements.txt +RUN apt-get install -y git +RUN pip install -r requirements.test.txt RUN python setup.py gocythonize RUN pip install -U . diff --git a/.jenkins/1804.py2.docker b/.jenkins/1804.py2.docker index 01223b84..bf98dc88 100644 --- a/.jenkins/1804.py2.docker +++ b/.jenkins/1804.py2.docker @@ -33,6 +33,8 @@ ADD setup.py /src/cubical/setup.py WORKDIR /src/cubical RUN pip install -r requirements.txt +RUN apt-get install -y git +RUN pip install -r requirements.test.txt RUN python2.7 setup.py gocythonize RUN pip install -U . diff --git a/.jenkins/1804.py3.docker b/.jenkins/1804.py3.docker index d9d71b5d..ce398bee 100644 --- a/.jenkins/1804.py3.docker +++ b/.jenkins/1804.py3.docker @@ -4,7 +4,6 @@ MAINTAINER Ben Hugo "bhugo@ska.ac.za" ENV DEB_DEPENDENCIES casacore-dev \ casacore-data \ build-essential \ - python3.7 \ python3-pip \ libboost-all-dev \ wcslib-dev \ @@ -26,15 +25,17 @@ ADD HEADER /src/cubical/HEADER ADD LICENSE.md /src/cubical/LICENSE.md ADD MANIFEST.in /src/cubical/MANIFEST.in ADD README.md /src/cubical/README.md -ADD requirements.txt /src/cubical/requirements.txt +ADD requirements3.txt /src/cubical/requirements3.txt ADD requirements.test.txt /src/cubical/requirements.test.txt ADD rtd_requirements.txt /src/cubical/rtd_requirements.txt ADD setup.py /src/cubical/setup.py WORKDIR /src/cubical -RUN pip3 install -r requirements.txt -RUN python3 setup.py gocythonize -RUN pip3 install -U . +RUN python3.6 -m pip install -r requirements3.txt +RUN apt-get install -y git +RUN python3.6 -m pip install -r requirements.test.txt +RUN python3.6 setup.py gocythonize +RUN python3.6 -m pip install -U . ENTRYPOINT ["gocubical"] CMD ["--help"] diff --git a/Jenkinsfile.sh b/Jenkinsfile.sh index efd22e11..6f62739b 100644 --- a/Jenkinsfile.sh +++ b/Jenkinsfile.sh @@ -23,5 +23,5 @@ do -v ${TEST_OUTPUT_DIR}:/root/tmp \ --entrypoint /bin/bash \ cubical.${img}:${BUILD_NUMBER} \ - -c "cd /src/cubical && apt-get install -y git && pip install -r requirements.test.txt && nosetests --with-xunit --xunit-file /workspace/nosetests.xml test" + -c "cd /src/cubical && nosetests --with-xunit --xunit-file /workspace/nosetests.xml test" done \ No newline at end of file diff --git a/cubical/kernels/cygenerics.pyx b/cubical/kernels/cygenerics.pyx index d1b3418f..b2ce6895 100644 --- a/cubical/kernels/cygenerics.pyx +++ b/cubical/kernels/cygenerics.pyx @@ -189,7 +189,7 @@ def half_baselines(int n_ant): global _half_baselines_view cdef int i if n_ant != _half_baselines_nant: - nbl = n_ant*(n_ant-1)/2 + nbl = n_ant*(n_ant-1)//2 _half_baselines = np.empty((nbl,2),np.int32) _half_baselines_view = _half_baselines i = 0 diff --git a/requirements3.txt b/requirements3.txt new file mode 100644 index 00000000..1f73bc94 --- /dev/null +++ b/requirements3.txt @@ -0,0 +1,3 @@ +numpy >= 1.11.3 +cython >= 0.25.2 +python-casacore >= 2.1.2 \ No newline at end of file diff --git a/setup.py b/setup.py index c7ce93d8..5cc07df3 100644 --- a/setup.py +++ b/setup.py @@ -152,7 +152,7 @@ def run(self): requirements = ['future', 'numpy', 'futures', - 'python-casacore>=2.1.2', + 'python-casacore>=2.1.2' if six.PY2 else 'python-casacore<=3.0.0', 'sharedarray', 'matplotlib<3.0', 'cython', From b55a6a4c539faa3cef12f9e6e03044c4e1c434b7 Mon Sep 17 00:00:00 2001 From: Benna Hugo Date: Fri, 24 May 2019 19:50:21 +0200 Subject: [PATCH 16/25] Ensure CC 3.0 is installed --- requirements3.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements3.txt b/requirements3.txt index 1f73bc94..519a02d7 100644 --- a/requirements3.txt +++ b/requirements3.txt @@ -1,3 +1,3 @@ numpy >= 1.11.3 cython >= 0.25.2 -python-casacore >= 2.1.2 \ No newline at end of file +python-casacore <= 3.0.0 \ No newline at end of file From 9afe9b0696b826c7e5cd2cd13999db1796d1d32d Mon Sep 17 00:00:00 2001 From: Benna Hugo Date: Tue, 28 May 2019 18:24:29 +0200 Subject: [PATCH 17/25] Py2 and Py3 compatible changes --- .jenkins/1604.py2.docker | 13 ++- .jenkins/1804.py2.docker | 12 +-- .jenkins/1804.py3.docker | 10 +-- MANIFEST.in | 2 +- cubical/data_handler/TiggerSourceProvider.py | 52 ++++++----- cubical/data_handler/ms_tile.py | 14 ++- cubical/database/casa_db_adaptor.py | 27 +++++- docs/installation.rst | 95 +++++--------------- pyproject.toml | 2 + requirements.test.txt | 2 - requirements.txt | 4 - requirements3.txt | 3 - setup.py | 28 +++--- 13 files changed, 114 insertions(+), 150 deletions(-) create mode 100644 pyproject.toml delete mode 100644 requirements.test.txt delete mode 100644 requirements.txt delete mode 100644 requirements3.txt diff --git a/.jenkins/1604.py2.docker b/.jenkins/1604.py2.docker index 28b22fac..fa212b34 100644 --- a/.jenkins/1604.py2.docker +++ b/.jenkins/1604.py2.docker @@ -7,7 +7,9 @@ ENV DEB_DEPENDENCIES casacore-dev \ python-pip \ libboost-all-dev \ wcslib-dev \ - libcfitsio3-dev + libcfitsio3-dev \ + git + RUN apt-get update RUN apt-get install -y $DEB_DEPENDENCIES RUN pip install -U pip wheel setuptools @@ -25,18 +27,13 @@ ADD HEADER /src/cubical/HEADER ADD LICENSE.md /src/cubical/LICENSE.md ADD MANIFEST.in /src/cubical/MANIFEST.in ADD README.md /src/cubical/README.md -ADD requirements.txt /src/cubical/requirements.txt -ADD requirements.test.txt /src/cubical/requirements.test.txt ADD rtd_requirements.txt /src/cubical/rtd_requirements.txt ADD setup.py /src/cubical/setup.py +ADD pyproject.toml /src/cubical/pyproject.toml WORKDIR /src/cubical RUN pip install -U pip setuptools wheel -RUN pip install -r requirements.txt -RUN apt-get install -y git -RUN pip install -r requirements.test.txt -RUN python setup.py gocythonize -RUN pip install -U . +RUN pip install . ENTRYPOINT ["gocubical"] CMD ["--help"] diff --git a/.jenkins/1804.py2.docker b/.jenkins/1804.py2.docker index bf98dc88..d99e8169 100644 --- a/.jenkins/1804.py2.docker +++ b/.jenkins/1804.py2.docker @@ -7,7 +7,8 @@ ENV DEB_DEPENDENCIES casacore-dev \ python-pip \ libboost-all-dev \ wcslib-dev \ - libcfitsio-dev + libcfitsio-dev \ + git RUN apt-get update RUN apt-get install -y $DEB_DEPENDENCIES RUN pip install -U pip wheel setuptools @@ -25,18 +26,13 @@ ADD HEADER /src/cubical/HEADER ADD LICENSE.md /src/cubical/LICENSE.md ADD MANIFEST.in /src/cubical/MANIFEST.in ADD README.md /src/cubical/README.md -ADD requirements.txt /src/cubical/requirements.txt -ADD requirements.test.txt /src/cubical/requirements.test.txt ADD rtd_requirements.txt /src/cubical/rtd_requirements.txt ADD setup.py /src/cubical/setup.py +ADD pyproject.toml /src/cubical/pyproject.toml WORKDIR /src/cubical -RUN pip install -r requirements.txt -RUN apt-get install -y git -RUN pip install -r requirements.test.txt -RUN python2.7 setup.py gocythonize -RUN pip install -U . +RUN pip install . ENTRYPOINT ["gocubical"] CMD ["--help"] diff --git a/.jenkins/1804.py3.docker b/.jenkins/1804.py3.docker index ce398bee..5d3cdc51 100644 --- a/.jenkins/1804.py3.docker +++ b/.jenkins/1804.py3.docker @@ -7,6 +7,7 @@ ENV DEB_DEPENDENCIES casacore-dev \ python3-pip \ libboost-all-dev \ wcslib-dev \ + git \ libcfitsio-dev RUN apt-get update RUN apt-get install -y $DEB_DEPENDENCIES @@ -25,17 +26,12 @@ ADD HEADER /src/cubical/HEADER ADD LICENSE.md /src/cubical/LICENSE.md ADD MANIFEST.in /src/cubical/MANIFEST.in ADD README.md /src/cubical/README.md -ADD requirements3.txt /src/cubical/requirements3.txt -ADD requirements.test.txt /src/cubical/requirements.test.txt ADD rtd_requirements.txt /src/cubical/rtd_requirements.txt ADD setup.py /src/cubical/setup.py +ADD pyproject.toml /src/cubical/pyproject.toml WORKDIR /src/cubical -RUN python3.6 -m pip install -r requirements3.txt -RUN apt-get install -y git -RUN python3.6 -m pip install -r requirements.test.txt -RUN python3.6 setup.py gocythonize -RUN python3.6 -m pip install -U . +RUN python3.6 -m pip install . ENTRYPOINT ["gocubical"] CMD ["--help"] diff --git a/MANIFEST.in b/MANIFEST.in index 18da2f8e..1ed60ff8 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,4 @@ include cubical/DefaultParset.cfg include cubical/database/blankcaltable.CASA.tgz -recursive-include cython * +recursive-include cubical/kernels *.pyx *.pxi *.c *.cpp *.h include cubical/bin/gocubical diff --git a/cubical/data_handler/TiggerSourceProvider.py b/cubical/data_handler/TiggerSourceProvider.py index 18b30a53..071bae2e 100644 --- a/cubical/data_handler/TiggerSourceProvider.py +++ b/cubical/data_handler/TiggerSourceProvider.py @@ -100,7 +100,7 @@ def point_lm(self, context): for ind, source in enumerate(self._pnt_sources[lp:up]): ra, dec = source.pos.ra, source.pos.dec - lm[ind,0], lm[ind,1] = radec_to_lm(ra, dec, self._phase_center) + lm[ind,0], lm[ind,1] = ra, dec #radec_to_lm(ra, dec, self._phase_center) return lm @@ -168,7 +168,7 @@ def gaussian_lm(self, context): for ind, source in enumerate(self._gau_sources[lg:ug]): ra, dec = source.pos.ra, source.pos.dec - lm[ind, 0], lm[ind, 1] = radec_to_lm(ra, dec, self._phase_center) + lm[ind, 0], lm[ind, 1] = ra, dec #radec_to_lm(ra, dec, self._phase_center) return lm @@ -215,6 +215,12 @@ def updated_dimensions(self): return [('npsrc', self._npsrc), ('ngsrc', self._ngsrc)] + + def phase_centre(self, context): + """ Sets the MB phase direction """ + radec = np.array([self._phase_center[...,-2], + self._phase_center[...,-1]], np.float32) + return radec def cluster_sources(sm, dde_tag): """ @@ -252,29 +258,31 @@ def cluster_sources(sm, dde_tag): return clus -def radec_to_lm(ra, dec, phase_center): - """ - Convert right-ascension and declination to direction cosines. +# def radec_to_lm(ra, dec, phase_center): +# """ +# DEPRICATED: Montblanc now implements WCS conversions internally - Args: - ra (float): - Right-ascension in radians. - dec (float): - Declination in radians. - phase_center (np.ndarray): - The coordinates of the phase center. +# Convert right-ascension and declination to direction cosines. - Returns: - tuple: - l and m coordinates. +# Args: +# ra (float): +# Right-ascension in radians. +# dec (float): +# Declination in radians. +# phase_center (np.ndarray): +# The coordinates of the phase center. - """ +# Returns: +# tuple: +# l and m coordinates. + +# """ - delta_ra = ra - phase_center[...,-2] - dec_0 = phase_center[...,-1] +# delta_ra = ra - phase_center[...,-2] +# dec_0 = phase_center[...,-1] - l = np.cos(dec)*np.sin(delta_ra) - m = np.sin(dec)*np.cos(dec_0) -\ - np.cos(dec)*np.sin(dec_0)*np.cos(delta_ra) +# l = np.cos(dec)*np.sin(delta_ra) +# m = np.sin(dec)*np.cos(dec_0) -\ +# np.cos(dec)*np.sin(dec_0)*np.cos(delta_ra) - return l, m +# return l, m diff --git a/cubical/data_handler/ms_tile.py b/cubical/data_handler/ms_tile.py index 349bf560..78e3d844 100644 --- a/cubical/data_handler/ms_tile.py +++ b/cubical/data_handler/ms_tile.py @@ -132,7 +132,7 @@ def load_montblanc_models(self, uvwco, loaded_models, model_source, cluster, imo # Given data, we need to make sure that it looks the way MB wants it to. # First step - check the number of rows. - n_bl = (self.nants * (self.nants - 1)) / 2 + n_bl = (self.nants * (self.nants - 1)) // 2 uniq_times = np.unique(self.times) ntime = len(uniq_times) uniq_time_col = np.unique(self.time_col) @@ -147,8 +147,16 @@ def load_montblanc_models(self, uvwco, loaded_models, model_source, cluster, imo ddid_index, uniq_ddids, _ = data_handler.uniquify(self.ddid_col) self._freqs = np.array([self.tile.dh.chanfreqs[ddid] for ddid in uniq_ddids]) - - self._row_identifiers = ddid_index * n_bl * ntime + (self.times - self.times[0]) * n_bl + \ + + def timestep_index(times, tol=1.0e-9): + """ Compute the prescan operation to find the unique timestep idenfiers for + a TIME_CENTROID array """ + tindx = np.zeros_like(times, dtype=np.int64) + tindx[1:] = np.abs(times[1:] - times[:-1]) > tol + tindx = np.add.accumulate(tindx) + return tindx + + self._row_identifiers = ddid_index * n_bl * ntime + timestep_index(self.times) * n_bl + \ (-0.5 * self.antea ** 2 + (self.nants - 1.5) * self.antea + self.anteb - 1).astype( np.int32) diff --git a/cubical/database/casa_db_adaptor.py b/cubical/database/casa_db_adaptor.py index 51b90712..6d780efb 100644 --- a/cubical/database/casa_db_adaptor.py +++ b/cubical/database/casa_db_adaptor.py @@ -11,6 +11,7 @@ import shutil import numpy as np import subprocess +import six log = logger.getLogger("casa_db_adaptor") @@ -40,6 +41,9 @@ def init_empty(cls, db, filename, solfreqs, solants, field_ndir=1, is_complex=Tr is_complex: Solutions are complex or real-valued viscal_label: Sets viscal property of CASA table - used as identifier in CASA """ + if six.PY3: + log.error("Gaintables cannot be written in Python 3 mode due to current casacore implementation issues") + return if os.path.exists(filename): if os.path.isfile(filename): log.error("CASA calibration table destination already exists but is not a directory. Will not remove.") @@ -74,7 +78,7 @@ def init_empty(cls, db, filename, solfreqs, solants, field_ndir=1, is_complex=Tr t.putcol("REFERENCE_DIR", np.tile(db.fieldrefdir[selfield], (field_ndir, 1))) t.putcol("CODE", np.tile(np.array(db.fieldcode)[selfield], (field_ndir, 1))) t.putcol("FLAG_ROW", np.tile(db.fieldflagrow[selfield], (field_ndir, 1))) - t.putcol("NAME", np.array(["%s_DIR_%d" % (f, fdi) for fdi, f in enumerate([db.fieldname[np.where(selfield)[0][0]]] * field_ndir)]).T) + t.putcol("NAME", np.array(map(str, ["%s_DIR_%d" % (f, fdi) for fdi, f in enumerate([db.fieldname[np.where(selfield)[0][0]]] * field_ndir)])).T) t.putcol("SOURCE_ID", np.tile(db.fieldsrcid[selfield], (field_ndir, 1)) + np.arange(field_ndir).T) t.putcol("TIME", np.tile(db.fieldtime[selfield], (field_ndir, 1))) @@ -119,7 +123,7 @@ def init_empty(cls, db, filename, solfreqs, solants, field_ndir=1, is_complex=Tr t.putcell("FREQ_GROUP", iddid, db.spwfreqgroup[spwid]) t.putcell("FREQ_GROUP_NAME", iddid, db.spwfreqgroupname[spwid]) t.putcell("IF_CONV_CHAIN", iddid, db.spwifconvchain[spwid]) - t.putcell("NAME", iddid, db.spwname[spwid]) + t.putcell("NAME", iddid, str(db.spwname[spwid])) t.putcell("NET_SIDEBAND", iddid, db.spwnetsideband[spwid]) t.putcell("NUM_CHAN", iddid, ddsolfreqs.size) t.putcell("TOTAL_BANDWIDTH", iddid, maxfreq - minfreq) @@ -144,6 +148,9 @@ def create_G_table(cls, db, gname, outname = "Gphase"): gname: name of pickled_db solutions to export outname: suffix of exported CASA gaintable """ + if six.PY3: + log.error("Gaintables cannot be written in Python 3 mode due to current casacore implementation issues") + return if np.prod(db[gname].shape) == 0: log.warn("No %s solutions. Will not write CASA table" % gname) return @@ -217,6 +224,9 @@ def create_B_table(cls, db, gname, outname = "B", diag=True): outname: suffix of exported CASA gaintable diag: Write out diagonal of Jones matrix if true, off-diagonal (leakage) terms otherwise. """ + if six.PY3: + log.error("Gaintables cannot be written in Python 3 mode due to current casacore implementation issues") + return if np.prod(db[gname].shape) == 0: log.warn("No %s solutions. Will not write CASA table" % gname) return @@ -285,6 +295,9 @@ def create_D_table(cls, db, gname, outname = "D"): gname: name of pickled_db solutions to export outname: suffix of exported CASA gaintable """ + if six.PY3: + log.error("Gaintables cannot be written in Python 3 mode due to current casacore implementation issues") + return cls.create_B_table(db, gname, outname, diag=False) @classmethod @@ -297,6 +310,9 @@ def create_K_table(cls, db, gname, outname = "K"): gname: name of pickled_db solutions to export outname: suffix of exported CASA gaintable """ + if six.PY3: + log.error("Gaintables cannot be written in Python 3 mode due to current casacore implementation issues") + return if np.prod(db[gname].shape) == 0: log.warn("No %s solutions. Will not write CASA table" % gname) return @@ -382,6 +398,9 @@ def set_metadata(self, src): Args: src: a cubical.data_handler instance """ + if six.PY3: + log.error("Gaintables cannot be written in Python 3 mode due to current casacore implementation issues") + return if not isinstance(src, MSDataHandler): raise TypeError("src must be of type Cubical DataHandler") @@ -436,6 +455,9 @@ def set_metadata(self, src): def __export(self): """ exports the database to CASA gaintables """ + if six.PY3: + log.error("Gaintables cannot be written in Python 3 mode due to current casacore implementation issues") + return self._load(self.filename) if not self.meta_avail: @@ -459,7 +481,6 @@ def __export(self): def close(self): """ see iface_database.close() for details """ - # move to closed state before exporting and loading back and sorting data do_export = (self.mode is "create") PickledDatabase.close(self) diff --git a/docs/installation.rst b/docs/installation.rst index 15bcd930..0246e56a 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -2,88 +2,37 @@ Requirements and Installation ***************************** -Ubuntu 18.04 and 16.04 -~~~~~~~~~~~~~~~~~~~~~~ - -CubiCal depends on python-casacore, the dependencies of which should be -installed from the KERN-3 ppa. Note that ``apt-get install`` is ``apt install`` -on 18.04. - -.. code:: bash - - apt-get install software-properties-common - apt-add-repository -s ppa:kernsuite/kern-3 - apt-add-repository multiverse - apt-add-repository restricted - apt-get update - apt-get install -y casacore-dev libboost-python-dev libcfitsio3-dev wcslib-dev - -If you wish to install CubiCal in a virtual environment (recommended), see -`Using a virtual environment`_. - -.. note:: - - At this point, if CubiCal is required to predict model visiblities, it is necessary - to install Montblanc. To install Montblanc, follow the instructions here_. - - .. _here: https://montblanc.readthedocs.io - -CubiCal can now be installed from PyPI by running the following: - -.. code:: bash - - pip install cubical - -.. warning:: - - To install in development mode, you will need to install some dependencies manually - and cythonize the development kernels explicitly. Assuming that you have already - cloned the repository, this can be done as follows: - - .. code:: bash - - pip install cython numpy - python path/to/repo/setup.py gocythonize - pip install -e path/to/repo/ - - The explicit cythonization step also allows for forced recythonization via ``--force`` or ``-f``: - - .. code:: bash - - python path/to/repo/setup.py gocythonize -f - - -Ubuntu 14.04 +Ubuntu 18.04 ~~~~~~~~~~~~ CubiCal depends on python-casacore, the dependencies of which should be -installed from the radio-astro ppa. +installed from the KERN-5 ppa. Note that ``apt-get install`` is ``apt install`` +on 18.04. .. code:: bash - apt-get install software-properties-common - apt-add-repository -s ppa:radio-astro/main - apt-add-repository multiverse - apt-add-repository restricted + ENV DEB_DEPENDENCIES casacore-dev \ + casacore-data \ + build-essential \ + python3-pip \ + libboost-all-dev \ + wcslib-dev \ + git \ + libcfitsio-dev apt-get update - apt-get install -y libboost-python-dev libcfitsio3-dev wcslib-dev libcasacore2-dev + apt-get install -y $DEB_DEPENDENCIES + pip3 install -U pip wheel setuptools + python3.6 -m pip install -U . + python3.6 -m pip install path/to/repo/ If you wish to install CubiCal in a virtual environment (recommended), see `Using a virtual environment`_. -.. warning:: - - A special requirement on 14.04 is the installation of a specific version of python-casacore - (to match the version of casacore in radio-astro). To install this dependency run: - - .. code:: bash - - pip install python-casacore==2.1.2 - .. note:: At this point, if CubiCal is required to predict model visiblities, it is necessary - to install Montblanc. To install Montblanc, follow the instructions here_. + to install Montblanc. The CPU version of montblanc is installed automatically if montblanc has not been previously + installed. To install the GPU version of Montblanc, follow the instructions here_ before installing cubical. .. _here: https://montblanc.readthedocs.io @@ -101,8 +50,6 @@ CubiCal can now be installed from PyPI by running the following: .. code:: bash - pip install cython numpy - python path/to/repo/setup.py gocythonize pip install -e path/to/repo/ The explicit cythonization step also allows for forced recythonization via ``--force`` or ``-f``: @@ -115,17 +62,17 @@ Using a virtual environment ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Installing CubiCal in a virtual enviroment is highly recommended. To install -virtualenv using pip, run: +virtualenv using apt, run: .. code:: bash - pip install virtualenv + apt-get install python3-virtualenv To create a virtualenv, run: .. code:: bash - virtualenv path/to/env/name + virtualenv -p python3 path/to/env/name Activate the environment using: @@ -139,6 +86,6 @@ It is often necessary to update pip, setuptools and wheel inside the environment .. code:: bash - pip install -U pip setuptools wheel + pip3 install -U pip setuptools wheel Return to `Ubuntu 18.04 and 16.04`_ or `Ubuntu 14.04`_ to continue with installation. \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..dc69929f --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,2 @@ +[build-system] +requires = ["setuptools", "wheel", "cython", "six", "numpy"] \ No newline at end of file diff --git a/requirements.test.txt b/requirements.test.txt deleted file mode 100644 index 2b048e4d..00000000 --- a/requirements.test.txt +++ /dev/null @@ -1,2 +0,0 @@ -git+https://github.com/ska-sa/montblanc.git@ddfacet -nose >= 1.3.7 diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 74e39a19..00000000 --- a/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -numpy >= 1.11.3 -cython >= 0.25.2 -futures >= 3.0.5 -python-casacore >= 2.1.2 diff --git a/requirements3.txt b/requirements3.txt deleted file mode 100644 index 519a02d7..00000000 --- a/requirements3.txt +++ /dev/null @@ -1,3 +0,0 @@ -numpy >= 1.11.3 -cython >= 0.25.2 -python-casacore <= 3.0.0 \ No newline at end of file diff --git a/setup.py b/setup.py index 5cc07df3..3cfa6684 100644 --- a/setup.py +++ b/setup.py @@ -54,23 +54,17 @@ from Cython.Build import cythonize import Cython.Compiler.Options as CCO except ImportError: - cythonize = None + raise ImportError("Please install cython before running install. If you're using pip 19 to install this package you should not be seeing this message") -preinstall_dependencies = ["'six >= 1.12.0'"] try: import six -except ImportError as e: - import subprocess - import pip - subprocess.call(["cd .. && pip install %s" % - (" ".join(preinstall_dependencies)), ""], shell=True) - subprocess.call(["cd .. && pip3 install %s" % - (" ".join(preinstall_dependencies)), ""], shell=True) - try: - import six - except ImportError as e: - raise ImportError("Six autoinstall failed. Please install Python 2.x compatibility package six before running Cubical install") +except ImportError: + raise ImportError("Please install six before running install. If you're using pip 19 to install this package you should not be seeing this message") +try: + import numpy +except ImportError: + raise ImportError("Please install numpy before running install. If you're using pip 19 to install this package you should not be seeing this message") cmpl_args = ['-ffast-math', '-O2', @@ -153,11 +147,15 @@ def run(self): 'numpy', 'futures', 'python-casacore>=2.1.2' if six.PY2 else 'python-casacore<=3.0.0', - 'sharedarray', + 'sharedarray @ git+https://gitlab.com/bennahugo/shared-array.git@master', 'matplotlib<3.0', 'cython', 'scipy', - 'astro-tigger-lsm'] + 'astro-tigger-lsm', + 'six', + 'montblanc @ git+https://github.com/ska-sa/montblanc.git@ddfacet'] + if six.PY2: + requirements.append('futures') setup(name='cubical', version=cubical.VERSION, From 91dc2752f2fc1a9312e3ed362a4cb2a5fd77817a Mon Sep 17 00:00:00 2001 From: Benna Hugo Date: Tue, 28 May 2019 19:59:29 +0200 Subject: [PATCH 18/25] Make test relative --- test/d147_test.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/test/d147_test.py b/test/d147_test.py index 733e48eb..763c2490 100644 --- a/test/d147_test.py +++ b/test/d147_test.py @@ -37,7 +37,7 @@ def generate_reference(self, colname, args=[], **kw): if retcode: raise RuntimeError("gocubical failed, return code {}".format(retcode)) - def verify(self, refcolname, args=[], tolerance=1e-6, **kw): + def verify(self, refcolname, args=[], mean_tolerance=-30, ninetyfifth_tolerance=-25, **kw): cmd = self.cmdline + kw_to_args(data_ms=self.msname, out_column="CORRECTED_DATA", out_name="test_"+refcolname+"/cc", **kw) + \ " " + " ".join(args) logprint("*** running {}".format(cmd)) @@ -48,10 +48,15 @@ def verify(self, refcolname, args=[], tolerance=1e-6, **kw): if not np.isfinite(cd).all(): raise RuntimeError("{}: NaNs/INFs detected in output data".format(cmd)) c0 = table(self.refmsname).getcol(refcolname) - diff = abs(cd-c0).max() - logprint("*** max diff between CORRECTED_DATA and {} is {}".format(refcolname, diff)) - if not diff <= tolerance: - raise RuntimeError("{}: diff {} exceeds tolerance of {}".format(cmd, diff, tolerance)) + diff = abs(abs(cd-c0)/abs(c0)) + diffmean = 10*np.log10(np.nanmean(diff)) + logprint("*** mean relative diff between CORRECTED_DATA and {} is {} dB".format(refcolname, diffmean)) + if diffmean > mean_tolerance: + raise RuntimeError("{}: diff {} dB exceeds tolerance of {} dB".format(cmd, diffmean, mean_tolerance)) + diff95 = 10*np.log10(np.nanpercentile(diff, 95.0)) + logprint("*** ninety fifth percentile relative diff between CORRECTED_DATA and {} is {} dB".format(refcolname, diff95)) + if diff95 > ninetyfifth_tolerance: + raise RuntimeError("{}: diff {} dB exceeds tolerance of {} dB".format(cmd, diff95, ninetyfifth_tolerance)) d147_test_list = [ ("GSOL_DATA", dict()), From 15ccd33c0f7439061401d53a9591ca540f3fcb7d Mon Sep 17 00:00:00 2001 From: Benna Hugo Date: Wed, 29 May 2019 12:45:29 +0200 Subject: [PATCH 19/25] Add asserts to verify types in ms_tile --- cubical/data_handler/__init__.py | 2 +- cubical/data_handler/ms_data_handler.py | 4 ++-- cubical/data_handler/ms_tile.py | 22 +++++++++++---------- cubical/tools/dtype_checks.py | 26 +++++++++++++++++++++++++ 4 files changed, 41 insertions(+), 13 deletions(-) create mode 100644 cubical/tools/dtype_checks.py diff --git a/cubical/data_handler/__init__.py b/cubical/data_handler/__init__.py index 2bd6d286..b31db40e 100644 --- a/cubical/data_handler/__init__.py +++ b/cubical/data_handler/__init__.py @@ -14,7 +14,7 @@ def uniquify(values): Returns tuple of indices, unique_values, rmap """ - uniq = np.array(sorted(set(values))) + uniq = np.unique(values) #np.array(sorted(set(values))) rmap = {x: i for i, x in enumerate(uniq)} # apply this map to the time column to construct a timestamp column indices = np.fromiter(list(map(rmap.__getitem__, values)), int) diff --git a/cubical/data_handler/ms_data_handler.py b/cubical/data_handler/ms_data_handler.py index 6a3377fc..3355b0cb 100644 --- a/cubical/data_handler/ms_data_handler.py +++ b/cubical/data_handler/ms_data_handler.py @@ -88,9 +88,9 @@ def _parse_range(arg, nmax): Raises: TypeError: - If the type of arg is not understood. + If the type of arg is not log = logger.getLogger("data_handler")erstood. ValueError: - If the range cannot be parsed. + If the range cannot be parlog = logger.getLogger("data_handler"). """ fullrange = list(range(nmax)) diff --git a/cubical/data_handler/ms_tile.py b/cubical/data_handler/ms_tile.py index 78e3d844..361a64b7 100644 --- a/cubical/data_handler/ms_tile.py +++ b/cubical/data_handler/ms_tile.py @@ -10,6 +10,7 @@ from cubical.tools import shared_dict from cubical.flagging import FL from cubical import data_handler +from cubical.tools import dtype_checks as dtc from cubical.tools import logger, ModColor log = logger.getLogger("data_handler") @@ -125,6 +126,12 @@ def load_montblanc_models(self, uvwco, loaded_models, model_source, cluster, imo from . import MBTiggerSim from montblanc.impl.rime.tensorflow.sources import CachedSourceProvider, FitsBeamSourceProvider + assert dtc.assert_isint(self.times), self.times.dtype + assert dtc.assert_isint(self.antea), self.antea.dtype + assert dtc.assert_isint(self.nants) + assert dtc.assert_isint(self.anteb), self.anteb.dtype + assert dtc.assert_isfp(self.time_col), self.time_col.dtype + assert dtc.assert_isint(self.ddid_col), self.ddid_col.dtype # setup montblanc machinery once per subset (may be called multiple times for different models) if not self._mb_measet_src: @@ -148,18 +155,13 @@ def load_montblanc_models(self, uvwco, loaded_models, model_source, cluster, imo self._freqs = np.array([self.tile.dh.chanfreqs[ddid] for ddid in uniq_ddids]) - def timestep_index(times, tol=1.0e-9): - """ Compute the prescan operation to find the unique timestep idenfiers for - a TIME_CENTROID array """ - tindx = np.zeros_like(times, dtype=np.int64) - tindx[1:] = np.abs(times[1:] - times[:-1]) > tol - tindx = np.add.accumulate(tindx) - return tindx - - self._row_identifiers = ddid_index * n_bl * ntime + timestep_index(self.times) * n_bl + \ + + assert dtc.assert_isint(n_bl) + assert dtc.assert_isint(ddid_index) + + self._row_identifiers = ddid_index * n_bl * ntime + (self.times - self.times[0]) * n_bl + \ (-0.5 * self.antea ** 2 + (self.nants - 1.5) * self.antea + self.anteb - 1).astype( np.int32) - # make full list of row indices in Montblanc-compliant order (ddid-time-ant1-ant2) full_index = [(p, q, t, d) for d in range(len(uniq_ddids)) for t in uniq_times for p in range(self.nants) for q in range(self.nants) diff --git a/cubical/tools/dtype_checks.py b/cubical/tools/dtype_checks.py new file mode 100644 index 00000000..e5ec19f8 --- /dev/null +++ b/cubical/tools/dtype_checks.py @@ -0,0 +1,26 @@ +import six +import numpy as np + +if six.PY3: + long = int + +def assert_isint(v): + if isinstance(v, (int, long, bool)): + return True + elif isinstance(v, list): + return all(map(lambda x: isinstance(x, (int, long, bool)), v)) + elif isinstance(v, np.ndarray): + return v.dtype in [np.int, np.int16, np.int32, np.int64, np.int8, + np.int_, np.intc, np.integer, np.bool, int, long] + else: + return False + +def assert_isfp(v): + if isinstance(v, float): + return True + elif isinstance(v, list): + return all(map(lambda x: isinstance(x, float), v)) + elif isinstance(v, np.ndarray): + return v.dtype in [np.float, np.float_, np.float64, np.float16, np.float32, np.double] + else: + return False \ No newline at end of file From 2723e27d5f37c7fb797e7a1fedcbed7d036cb0c5 Mon Sep 17 00:00:00 2001 From: Benna Hugo Date: Wed, 29 May 2019 16:30:40 +0200 Subject: [PATCH 20/25] fix pip install from a non-cythonized source directory --- setup.py | 54 +++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 43 insertions(+), 11 deletions(-) diff --git a/setup.py b/setup.py index 3cfa6684..8adc0bf3 100644 --- a/setup.py +++ b/setup.py @@ -95,8 +95,8 @@ def initialize_options(self): def finalize_options(self): self.force = self.force or 0 - def run(self): - + @classmethod + def populate_extensions(cls): if not cythonize: raise Exception("Cython not available, please install first.") @@ -116,17 +116,49 @@ def run(self): extra_compile_args=cmpl_args_omp if omp else cmpl_args, extra_link_args=link_args_omp if omp else link_args, language="c++" if cpp else "c")) + + return extensions + def run(self): + extensions = gocythonize.populate_extensions() cythonize(extensions, compiler_directives={'binding': True, 'language_level' : "3" if six.PY3 else "2"}, annotate=True, force=self.force) +# the default build_ext only builds extensions specified through the ext_modules list +# however to be absolutely safe for wheel building from source using pip v19 cythonization +# must be invoked to check that all the necessary cxx and c files have been created, +# otherwise they need to be first created before the extension modules are compiled using +# the normal cxx and c compiler invoked by the Extension class of setuptools + +class custom_build_ext(build_ext, gocythonize): + """ Build all extension modules """ + + description = 'Cythonise CubiCal kernels and build thereafter with the c/cxx compiler' + + user_options = [('force', 'f', 'Force cythonisation.')] + + def __init__(self, *args, **kwargs): + build_ext.__init__(self, *args, **kwargs) + + def initialize_options(self): + build_ext.initialize_options(self) + gocythonize.initialize_options(self) + + def finalize_options(self): + build_ext.finalize_options(self) + gocythonize.finalize_options(self) + + def run(self): + gocythonize.run(self) # first cythonize (if needed) + build_ext.run(self) # then GNU build + -extensions = [] +c_cpp_extensions = [] for source in glob.glob("cubical/kernels/*.pyx"): name, _ = os.path.splitext(source) is_cpp = any([s in name for s in cpp_extensions]) is_omp = name.endswith("_omp") - extensions.append( + c_cpp_extensions.append( Extension(name.replace("/","."), [name + ".cpp" if is_cpp else name + ".c"], include_dirs=[include_path], extra_compile_args=cmpl_args_omp if is_omp else cmpl_args, @@ -139,23 +171,22 @@ def run(self): if on_rtd: requirements = ['numpy', 'cython', - 'futures', + 'futures; python_version <= "2.7"', 'matplotlib', 'scipy'] else: requirements = ['future', 'numpy', - 'futures', - 'python-casacore>=2.1.2' if six.PY2 else 'python-casacore<=3.0.0', + 'python-casacore>=2.1.2; python_version <= "2.7"', + 'python-casacore<=3.0.0; python_version >= "3.0"', 'sharedarray @ git+https://gitlab.com/bennahugo/shared-array.git@master', 'matplotlib<3.0', 'cython', 'scipy', 'astro-tigger-lsm', 'six', + 'futures; python_version <= "2.7"', 'montblanc @ git+https://github.com/ska-sa/montblanc.git@ddfacet'] - if six.PY2: - requirements.append('futures') setup(name='cubical', version=cubical.VERSION, @@ -174,7 +205,7 @@ def run(self): long_description=long_description, long_description_content_type='text/markdown', - cmdclass={'build_ext': build_ext, + cmdclass={'build_ext': custom_build_ext, 'gocythonize': gocythonize}, packages=['cubical', @@ -185,10 +216,11 @@ def run(self): 'cubical.plots', 'cubical.database', 'cubical.madmax'], + python_requires='<3.0' if six.PY2 else ">=3.0", #build a py2 or py3 specific wheel depending on environment (due to cython backend) install_requires=requirements, include_package_data=True, zip_safe=False, - ext_modules = extensions, + ext_modules = c_cpp_extensions, scripts = ['cubical/bin/print-cubical-stats'], entry_points={'console_scripts': ['gocubical = cubical.main:main']}, ) From 1ba4fefaba3098918db40159d07b24b208c24b69 Mon Sep 17 00:00:00 2001 From: Jonathan Kenyon Date: Thu, 30 May 2019 10:02:08 +0200 Subject: [PATCH 21/25] Fix dtype error on phase centre array. --- cubical/data_handler/TiggerSourceProvider.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cubical/data_handler/TiggerSourceProvider.py b/cubical/data_handler/TiggerSourceProvider.py index 071bae2e..3a18e648 100644 --- a/cubical/data_handler/TiggerSourceProvider.py +++ b/cubical/data_handler/TiggerSourceProvider.py @@ -219,7 +219,7 @@ def updated_dimensions(self): def phase_centre(self, context): """ Sets the MB phase direction """ radec = np.array([self._phase_center[...,-2], - self._phase_center[...,-1]], np.float32) + self._phase_center[...,-1]], np.float64) return radec def cluster_sources(sm, dde_tag): From b213dcb1833f504862e9c0476c567eec3f26d4e4 Mon Sep 17 00:00:00 2001 From: Jonathan Kenyon Date: Fri, 31 May 2019 11:23:45 +0200 Subject: [PATCH 22/25] Fix for python2 bombing when inseting BITFLAG column. --- cubical/flagging.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cubical/flagging.py b/cubical/flagging.py index 74a75a6e..0a9a5f08 100644 --- a/cubical/flagging.py +++ b/cubical/flagging.py @@ -148,6 +148,9 @@ def flagmask (self,name,create=False): If there are too many flagsets to create a new one. """ + # Cludge for python2/3 interoperability. + name = str(name) + # lookup flagbit, return if found if self.order is None: raise TypeError("MS does not contain a BITFLAG column. Please run the addbitflagcol" \ @@ -315,7 +318,7 @@ def flag_chisq (st, GD, basename, nddid): pylab.colorbar() # reshape flag array into time, ddid, channel - flag3 = flag.reshape((nt, nddid, nf / nddid)) + flag3 = flag.reshape((nt, nddid, nf // nddid)) # flag entire DDIDs with overdense flagging maxcount = nt*nf/nddid @@ -338,4 +341,4 @@ def flag_chisq (st, GD, basename, nddid): if show_plots: pylab.show() - return flag3 \ No newline at end of file + return flag3 From 285c9ac2a8006e8e1e925fbf5e5c8da84bc38aa4 Mon Sep 17 00:00:00 2001 From: Benna Hugo Date: Fri, 31 May 2019 15:47:01 +0200 Subject: [PATCH 23/25] Depend on tagged release of montblanc --- setup.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 8adc0bf3..c99c878d 100644 --- a/setup.py +++ b/setup.py @@ -186,7 +186,8 @@ def run(self): 'astro-tigger-lsm', 'six', 'futures; python_version <= "2.7"', - 'montblanc @ git+https://github.com/ska-sa/montblanc.git@ddfacet'] + 'montblanc @ + git+https://github.com/ska-sa/montblanc.git@0.6'] setup(name='cubical', version=cubical.VERSION, From 036d3854754afd473759d2f66da8fa34a51d894c Mon Sep 17 00:00:00 2001 From: Benna Hugo Date: Fri, 31 May 2019 15:56:33 +0200 Subject: [PATCH 24/25] typo --- setup.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/setup.py b/setup.py index c99c878d..4db8e2c2 100644 --- a/setup.py +++ b/setup.py @@ -186,8 +186,7 @@ def run(self): 'astro-tigger-lsm', 'six', 'futures; python_version <= "2.7"', - 'montblanc @ - git+https://github.com/ska-sa/montblanc.git@0.6'] + 'montblanc @git+https://github.com/ska-sa/montblanc.git@0.6'] setup(name='cubical', version=cubical.VERSION, From d0909b9b6424fb8efd4b995606e6b2e0180a5cd3 Mon Sep 17 00:00:00 2001 From: Benna Hugo Date: Fri, 31 May 2019 18:21:27 +0200 Subject: [PATCH 25/25] Done and dusted py3 tested --- cubical/data_handler/TiggerSourceProvider.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cubical/data_handler/TiggerSourceProvider.py b/cubical/data_handler/TiggerSourceProvider.py index 3a18e648..a6f1848e 100644 --- a/cubical/data_handler/TiggerSourceProvider.py +++ b/cubical/data_handler/TiggerSourceProvider.py @@ -219,7 +219,7 @@ def updated_dimensions(self): def phase_centre(self, context): """ Sets the MB phase direction """ radec = np.array([self._phase_center[...,-2], - self._phase_center[...,-1]], np.float64) + self._phase_center[...,-1]], context.dtype) return radec def cluster_sources(sm, dde_tag): diff --git a/setup.py b/setup.py index 4db8e2c2..8c842b61 100644 --- a/setup.py +++ b/setup.py @@ -186,7 +186,7 @@ def run(self): 'astro-tigger-lsm', 'six', 'futures; python_version <= "2.7"', - 'montblanc @git+https://github.com/ska-sa/montblanc.git@0.6'] + 'montblanc @git+https://github.com/ska-sa/montblanc.git@0.6.1'] setup(name='cubical', version=cubical.VERSION,