diff --git a/FF_HEDM/GetPackages.sh b/FF_HEDM/GetPackages.sh index b76df0e4..a3bdb114 100755 --- a/FF_HEDM/GetPackages.sh +++ b/FF_HEDM/GetPackages.sh @@ -19,15 +19,15 @@ if [ ! -d ${dirThis}/NLOPT ]; then # NLOPT INSTALL make -j8 install fi -if [ ! -d ${dirThis}/NLOPTShared ]; then # NLOPT SHARED INSTALL - cd $dirThis - echo $(pwd) - rm -rf nlopt-2.4.2 - tar -xzf nlopt.tar.gz - cd nlopt-2.4.2 - ./configure --prefix=${dirThis}/NLOPTShared --enable-shared - make -j8 install -fi +# if [ ! -d ${dirThis}/NLOPTShared ]; then # NLOPT SHARED INSTALL +# cd $dirThis +# echo $(pwd) +# rm -rf nlopt-2.4.2 +# tar -xzf nlopt.tar.gz +# cd nlopt-2.4.2 +# ./configure --prefix=${dirThis}/NLOPTShared --enable-shared +# make -j8 install +# fi if [ ! -d ${dirThis}/swift ]; then # SWIFT cd $dirThis diff --git a/FF_HEDM/Makefile b/FF_HEDM/Makefile index 8134686c..f7f0bb0c 100644 --- a/FF_HEDM/Makefile +++ b/FF_HEDM/Makefile @@ -31,8 +31,7 @@ endif SRCDIR=src/ BINDIR=bin/ -all: help bindircheck calibrant imagemax \ - fittiltbclsdsample fitposorstrains peaksfitting \ +all: help bindircheck calibrant imagemax fitposorstrains peaksfitting fittiltbclsdsample \ mergeoverlaps calcradius findsaturatedpx genmediandark fitgrain tiff2ge \ mergerings fittiltx fitwedge hkls indexer bindata processgrains graintracking\ mapmultdetectors matchgrains detectormapper mergemultiplescans \ diff --git a/FF_HEDM/src/CalibrantOMP.c b/FF_HEDM/src/CalibrantOMP.c index 30c2cbc9..b437807f 100644 --- a/FF_HEDM/src/CalibrantOMP.c +++ b/FF_HEDM/src/CalibrantOMP.c @@ -405,11 +405,13 @@ void CalcFittedMean(int nIndices, int *NrEachIndexBin, int **Indices, double *Av Rmi = Rs[j] - Rstep/2; Rma = Rs[j] + Rstep/2; CalcPeakProfileParallel(IndicesThis,NrIndicesThis,idxThis,Average,Rmi,Rma,EtaMi,EtaMa,ybc,zbc,px,NrPixels, &RetVal); + // printf("%lf ",RetVal); PeakShape[j] = RetVal; if (RetVal != 0){ AllZero = 0; } } + // printf("\n"); for (j=0;j #include #include +#include #define deg2rad 0.0174532925199433 #define rad2deg 57.2957795130823 @@ -51,6 +52,18 @@ #define TestBit(A,k) (A[(k/32)] & (1 << (k%32))) #define MAXNOMEGARANGES 2000 +int +CalcDiffractionSpots(double Distance, + double ExcludePoleAngle, + double OmegaRanges[MAXNOMEGARANGES][2], + int NoOfOmegaRanges, + double **hkls, + int n_hkls, + double BoxSizes[MAXNOMEGARANGES][4], + int *nTspots, + double OrientMatr[3][3], + double **TheorSpots); + // For detector mapping! extern int BigDetSize; extern int *BigDetector; @@ -669,7 +682,7 @@ double FitErrorsPosT(double x[12],int nSpotsComp,double spotsYZOIn[nSpotsComp][9 } static inline -double FitErrorsOrientStrains(double x[9],int nSpotsComp,double spotsYZO[nSpotsComp][12],int nhkls,double hklsIn[nhkls][7], +double FitErrorsOrientStrains(double x[9],int nSpotsComp,double spotsYZO[nSpotsComp][9],int nhkls,double hklsIn[nhkls][7], double Lsd,double Wavelength,int nOmeRanges,double OmegaRanges[nOmeRanges][2], double BoxSizes[nOmeRanges][4],double MinEta,double wedge,double chi, double Pos[3]) { @@ -1488,7 +1501,7 @@ int main(int argc, char *argv[]) FILE *hklf = fopen(hklfn,"r"); if (hklf == NULL){ printf("Could not read the hkl file. Exiting.\n"); - return; + return 1; } fgets(aline,1000,hklf); int h,kt,l,Rnr, nhkls=0; @@ -1511,7 +1524,7 @@ int main(int argc, char *argv[]) } } fclose(hklf); - if (nOmeRanges != nBoxSizes){printf("Number of omega ranges and number of box sizes don't match. Exiting!\n");return;} + if (nOmeRanges != nBoxSizes){printf("Number of omega ranges and number of box sizes don't match. Exiting!\n");return 1;} double MargOme=0.01,MargPos=Rsample,MargPos2=Rsample/2,MargOme2=2,chi=0; int thisRowNr; # pragma omp parallel for num_threads(numProcs) private(thisRowNr) schedule(dynamic) diff --git a/FF_HEDM/src/ForwardSimulationCompressed.c b/FF_HEDM/src/ForwardSimulationCompressed.c index 1360241e..ca8482df 100644 --- a/FF_HEDM/src/ForwardSimulationCompressed.c +++ b/FF_HEDM/src/ForwardSimulationCompressed.c @@ -1363,14 +1363,13 @@ main(int argc, char *argv[]) // } // printf("\n"); OrientMat2Euler(OM,EulerThis); - Euler2OrientMat(EulerThis,OM); - // for (i=0;i<3;i++){ - // for (j=0;j<3;j++){ - // printf("%lf ",OM[i][j]); - // } - // printf("\n"); - // } - // printf("\n"); + double OMT[9]; + Euler2OrientMat(EulerThis,OMT); + for (i=0;i<3;i++){ + for (j=0;j<3;j++){ + OM[i][j] = OMT[i*3+j]; + } + } // Calculate the spots now. CalcDiffrSpots_Furnace(hklsOut,OM,Lsd,Wavelength,TheorSpots,&nTspots); // For each spot, calculate displacement, calculate tilt and wedge effect. diff --git a/FF_HEDM/src/GetMisorientation.c b/FF_HEDM/src/GetMisorientation.c index e1a7e1b9..9487816d 100644 --- a/FF_HEDM/src/GetMisorientation.c +++ b/FF_HEDM/src/GetMisorientation.c @@ -24,6 +24,9 @@ #define deg2rad 0.0174532925199433 #define rad2deg 57.2957795130823 #define EPS 1e-9 +static inline double sin_cos_to_angle (double s, double c){return (s >= 0.0) ? acos(c) : 2.0 * M_PI - acos(c);} +static inline double cosd(double x){return cos(deg2rad*x);} +static inline double sind(double x){return sin(deg2rad*x);} static inline void normalizeQuat(double quat[4]){ double norm = sqrt(quat[0]*quat[0]+quat[1]*quat[1]+quat[2]*quat[2]+quat[3]*quat[3]); diff --git a/FF_HEDM/src/IndexerOMP.c b/FF_HEDM/src/IndexerOMP.c index a31b3d9a..6fbb7b4e 100644 --- a/FF_HEDM/src/IndexerOMP.c +++ b/FF_HEDM/src/IndexerOMP.c @@ -24,6 +24,7 @@ #include #include #include +#include static void check (int test, const char * message, ...) @@ -62,6 +63,17 @@ check (int test, const char * message, ...) RealType *ObsSpotsLab; int n_spots = 0; +int +CalcDiffractionSpots(double Distance, + double ExcludePoleAngle, + double OmegaRanges[MAX_N_OMEGARANGES][2], + int NoOfOmegaRanges, + double **hkls, + int n_hkls, + double BoxSizes[MAX_N_OMEGARANGES][4], + int *nTspots, + double OrientMatr[3][3], + double **TheorSpots); // hkls to use double hkls[MAX_N_HKLS][7]; diff --git a/FF_HEDM/src/PeaksFittingOMPZarr.c b/FF_HEDM/src/PeaksFittingOMPZarr.c index 8e29b77f..1cd05660 100644 --- a/FF_HEDM/src/PeaksFittingOMPZarr.c +++ b/FF_HEDM/src/PeaksFittingOMPZarr.c @@ -551,11 +551,11 @@ check (int test, const char * message, ...) } } -void main(int argc, char *argv[]){ +int main(int argc, char *argv[]){ double start_time = omp_get_wtime(); if (argc < 5){ printf("Usage: %s DataFile blockNr nBlocks numProcs (optional)ResultFolder\n",argv[0]); - return; + return 1; } char *DataFN = argv[1]; int blockNr = atoi(argv[2]); @@ -594,6 +594,7 @@ void main(int argc, char *argv[]){ double BadPxIntensity = 0; char *resultFolder=NULL, dummy[2048]; int maskLoc = -1; + int doPeakFit = 1; while ((zip_stat_index(arch, count, 0, finfo)) == 0) { if (strstr(finfo->name,"exchange/data/.zarray")!=NULL){ s = calloc(finfo->size + 1, sizeof(char)); @@ -714,6 +715,18 @@ void main(int argc, char *argv[]){ free(arr); free(data); } + if (strstr(finfo->name,"measurement/process/scan_parameters/doPeakFit/0")!=NULL){ + arr = calloc(finfo->size + 1, sizeof(char)); + fd = zip_fopen_index(arch, count, 0); + zip_fread(fd, arr, finfo->size); + dsize = sizeof(int); + data = (char*)malloc((size_t)dsize); + dsize = blosc1_decompress(arr,data,dsize); + doPeakFit = *(int *)&data[0]; + printf("doPeakFit: %d\n",doPeakFit); + free(arr); + free(data); + } if (strstr(finfo->name,"analysis/process/analysis_parameters/ResultFolder/0")!=NULL){ arr = calloc(finfo->size + 1, sizeof(char)); fd = zip_fopen_index(arch, count, 0); @@ -1533,12 +1546,38 @@ void main(int argc, char *argv[]){ } } printf("nPeaks %d\n",nPeaks); - double retVal; - int rc = Fit2DPeaks(nPeaks,NrPixelsThisRegion,z,UsefulPixels,MaximaValues,MaximaPositions,IntegratedIntensity,IMAX,YCEN,ZCEN,Rads,Etass,Ycen,Zcen,Thresh,NrPx,OtherInfo,NrPixels,&retVal); + double retVal=0; + int rc = 0; + // If we don't want to fit, we can just compute weighted center of mass, but first put nPeaks =1; + // We need {IntegratedIntensity}, {IMAX}, {YCEN}, {ZCEN}, {Rads}, {Etass}, sigmaR (0), sigmaEta (0), {NrPx}, returnCode (0), retVal (0) + // \tBG\tSigmaGR\tSigmaLR\tSigmaGEta\tSigmaLEta\tMU\n" All of these will be 0 + // OtherInfo is set already, will be 0 and will populate the other values in the line above. + if (doPeakFit == 0){ + nPeaks = 1; + IMAX[0] = MaximaValues[0]; + NrPx[0] = NrPixelsThisRegion; + YCEN[0] = 0; + ZCEN[0] = 0; + IntegratedIntensity[0] = 0; + for (i=0;i= 0.0) ? acos(c) : 2.0 * M_PI - acos(c);} +inline +double GetMisOrientation(double quat1[4], double quat2[4], double axis[3], double *Angle,int SGNr); + static inline void OrientMat2Euler(double m[3][3],double Euler[3]) { @@ -125,6 +128,9 @@ FreeMemMatrix(double **mat,int nrows) free(mat); } +inline +void OrientMat2Quat(double OrientMat[9], double Quat[4]); + static inline int FindInternalAnglesTwins(int nrIDs, int *IDs, int *IDsPerGrain, @@ -241,11 +247,22 @@ QuatToOrientMat( OrientMat[8] = 1 - 2*(Q1_2+Q2_2); } +inline void +CalcStrainTensorFableBeaudoin(double LatCin[6],double LatticeParameterFit[6], + double Orient[3][3], double StrainTensorSample[3][3]); + +inline int +StrainTensorKenesei(int nspots,double **SpotsInfo, double Distance, double wavelength, + double StrainTensorSample[3][3], int **IDHash, + double *dspacings, int nRings, int startSpotMatrix, double **SpotMatrix, double *RetVal, + double StrainTensorInput[3][3]); + + int main(int argc, char *argv[]) { if (argc != 2){ printf("Usage: ProcessGrains ZarrZip\n"); - return; + return 1; } clock_t start, end; double diftotal; @@ -767,7 +784,7 @@ int main(int argc, char *argv[]) } if (retval == 0){ printf("Did not read correct hash table for IDs. Exiting\n"); - return; + return 1; } FinalMatrix[nGrains][0] = GrainIDThis; for (j=0;j<21;j++){ diff --git a/gui/ff_asym.py b/gui/ff_asym.py index 7de8d606..13ae77fa 100644 --- a/gui/ff_asym.py +++ b/gui/ff_asym.py @@ -1040,7 +1040,7 @@ def replot(): # Main function root = Tk.Tk() root.wm_title("FF display v0.2 Dt. 2024/02/10 hsharma@anl.gov") -figur = Figure(figsize=(17,8),dpi=100) +figur = Figure(figsize=(15,6),dpi=100) canvas = FigureCanvasTkAgg(figur,master=root) a = figur.add_subplot(121,aspect='equal') b = figur.add_subplot(122,aspect='equal') diff --git a/utils/AutoCalibrateZarr.py b/utils/AutoCalibrateZarr.py index 0d5eea62..f92c3ee4 100644 --- a/utils/AutoCalibrateZarr.py +++ b/utils/AutoCalibrateZarr.py @@ -36,10 +36,12 @@ def fileReader(f,dset): data[data<1] = 1 return np.mean(data,axis=0).astype(np.uint16) -def generateZip(resFol,pfn,dfn='',dloc='',nchunks=-1,preproc=-1,outf='ZipOut.txt',errf='ZipErr.txt'): +def generateZip(resFol,pfn,dfn='',darkfn='',dloc='',nchunks=-1,preproc=-1,outf='ZipOut.txt',errf='ZipErr.txt'): cmd = pytpath+' '+os.path.expanduser('~/opt/MIDAS/utils/ffGenerateZip.py')+' -resultFolder '+ resFol +' -paramFN ' + pfn if dfn!='': cmd+= ' -dataFN ' + dfn + if darkfn!='': + cmd+= ' -darkFN ' + darkfn if dloc!='': cmd+= ' -dataLoc ' + dloc if nchunks!=-1: @@ -55,6 +57,8 @@ def generateZip(resFol,pfn,dfn='',dloc='',nchunks=-1,preproc=-1,outf='ZipOut.txt parser = MyParser(description='''Automated Calibration for WAXS using continuous rings-like signal. This code takes either Zarr.Zip files or HDF5 files.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-dataFN', type=str, required=True, help='DataFileName.zip') +parser.add_argument('-darkFN', type=str, required=False, default='', help='If separate file consists of the dark, provide this parameter.') +parser.add_argument('-dataLoc', type=str, required=False, default='', help='If data is located in any location except /exchange/data in the hdf5 files, provide this.') parser.add_argument('-MakePlots', type=int, required=False, default=0, help='MakePlots: to draw, use 1.') parser.add_argument('-FirstRingNr', type=int, required=False, default=1, help='FirstRingNumber on data.') parser.add_argument('-EtaBinSize', type=float, required=False, default=5, help='EtaBinSize in degrees.') @@ -62,11 +66,15 @@ def generateZip(resFol,pfn,dfn='',dloc='',nchunks=-1,preproc=-1,outf='ZipOut.txt parser.add_argument('-Threshold', type=float, required=False, default=0, help='If you want to give a manual threshold, typically 500, otherwise, it will calculate automatically.') parser.add_argument('-StoppingStrain', type=float, required=False, default=0.00004, help='If refined pseudo-strain is below this value and all rings are "good", we would have converged.') parser.add_argument('-ImTransOpt', type=int, required=False, default=[0],nargs='*', help="If you want to do any transformations to the data: \n0: nothing, 1: flip LR, 2: flip UD, 3: transpose. Give as many as needed in the right order.") -parser.add_argument('-ConvertFile', type=int, required=False, default=0, help="If you want to generate the zarr zip file from an HDF: \n0: input is zarr zip file, 1: HDF5 input will be used to generarte a zarr zip file.") -parser.add_argument('-paramFN', type=str, required=False, default='', help="If you use convertFile = 1, you need to provide the parameter file consisting of all settings.") +parser.add_argument('-ConvertFile', type=int, required=False, default=0, help="If you want to generate the zarr zip file from an HDF: \n0: input is zarr zip file, 1: HDF5 input will be used to generarte a zarr zip file, 2: Binary GE-type file.") +parser.add_argument('-paramFN', type=str, required=False, default='', help="If you use convertFile = 1, you need to provide the parameter file consisting of all settings: SpaceGroup, SkipFrame, px, LatticeParameter, Wavelength.") +parser.add_argument('-LsdGuess', type=float, required=False, default=1000000, help="If you know a guess for the Lsd, it might be good to kickstart things.") args, unparsed = parser.parse_known_args() dataFN = args.dataFN +darkFN = args.darkFN +dataLoc = args.dataLoc +LsdGuess = args.LsdGuess convertFile = args.ConvertFile if convertFile == 1: @@ -74,7 +82,7 @@ def generateZip(resFol,pfn,dfn='',dloc='',nchunks=-1,preproc=-1,outf='ZipOut.txt if len(psFN) == 0: print("Provide the parameter file if you want to generate a zarr zip file.") sys.exit() - dataFN = generateZip('.',psFN,dfn=dataFN,nchunks=100,preproc=0) + dataFN = generateZip('.',psFN,dfn=dataFN,nchunks=100,preproc=0,darkfn=darkFN,dloc=dataLoc) dataFN = dataFN.split('/')[-1] @@ -118,9 +126,9 @@ def generateZip(resFol,pfn,dfn='',dloc='',nchunks=-1,preproc=-1,outf='ZipOut.txt dark = np.transpose(dark) mrr = 2000000 # maximum radius to simulate rings. This, combined with initialLsd should give enough coverage. -initialLsd = 1000000 # Only used for simulation, real Lsd can be anything. +initialLsd = LsdGuess # Only used for simulation, real Lsd can be anything. minArea = 300 # Minimum number of pixels that constitutes signal. For powder calibrants, 300 is typically used. Partial rings are okay, as long as they are at least 300 pixels big. - +maxW = 1000 # This is the maximum width around the ideal ring that will be used to compute peak shape, and subsequently peak parameters. It might create problems if too wide peak shapes are used. def redraw_figure(): plt.draw() @@ -138,7 +146,7 @@ def runMIDAS(fn): pf.write('Ext '+ext+'\n') for transOpt in imTransOpt: pf.write(f'ImTransOpt {transOpt}\n') - pf.write('Width 2000\n') + pf.write(f'Width {maxW}\n') pf.write('tolTilts 3\n') pf.write('tolBC 20\n') pf.write('tolLsd 15000\n') @@ -254,6 +262,10 @@ def runMIDAS(fn): data_corr[data_corr0] = 255 +if DrawPlots == 1: + plt.imshow(thresh) + plt.colorbar() + plt.show() labels,nlabels = measure.label(thresh,return_num=True) props = measure.regionprops(labels) bc = [] @@ -326,7 +338,10 @@ def runMIDAS(fn): if bestRowNr == -1: continue lsds.append(initialLsd*rads[i]/sim_rads[bestRowNr]) -initialLsd = np.median(lsds) +if LsdGuess == 1000000: + initialLsd = np.median(lsds) +else: + initialLsd = LsdGuess bc_new = bc_computed print("FN:",rawFN,"Beam Center guess: ",np.flip(bc_new),' Lsd guess: ',initialLsd) with open('ps.txt','w') as pf: diff --git a/utils/ffGenerateZip.py b/utils/ffGenerateZip.py index 4cca9a7d..d14d958c 100644 --- a/utils/ffGenerateZip.py +++ b/utils/ffGenerateZip.py @@ -190,10 +190,19 @@ def applyCorrectionNumba(img,dark,darkpreproc,doStd): if h5py.is_hdf5(InputFN): hf2 = h5py.File(InputFN,'r') nFrames,numZ,numY = hf2[dataLoc].shape - if darkLoc in hf2: - darkData = hf2[darkLoc][()] + print(hf2[dataLoc].shape,dataLoc) + if h5py.is_hdf5(darkFN) and darkFN != InputFN: + print(f"We are going to read dark from a different HDF. Please make sure that the dark file contains information in {dataLoc} dataset.") + hfDark = h5py.File(darkFN,'r') + darkData = hfDark[dataLoc][()] + print(darkData.shape,dataLoc) + hfDark.close() else: - darkData = np.zeros((10,numZ,numY)) + if darkLoc in hf2: + darkData = hf2[darkLoc][()] + else: + darkData = np.zeros((10,numZ,numY)) + print(darkData.shape) dark = exc.create_dataset('dark',shape=darkData.shape,dtype=np.uint16,chunks=(1,darkData.shape[1],darkData.shape[2]),compression=compressor) darkMean = np.mean(darkData[skipF:,:,:],axis=0).astype(np.uint16) if preProc!=-1: @@ -232,7 +241,7 @@ def applyCorrectionNumba(img,dark,darkpreproc,doStd): stOmeOver[:] = hf2[searchStr][()][0] searchStr = '/measurement/instrument/GSAS2_PVS/Pressure' if searchStr in hf2: - print('Pressure values were found!') + print(f'Pressure values were found, will enter {nFrames} values!') pressureDSet = sp_pro_meas.create_dataset(searchStr.split('/')[-1],dtype=np.double,shape=(nFrames,),chunks=(nFrames,),compressor=compressor) pressureDSet[:] = hf2[searchStr][()] searchStr = '/measurement/instrument/GSAS2_PVS/Temperature' @@ -281,7 +290,7 @@ def applyCorrectionNumba(img,dark,darkpreproc,doStd): if numFrameChunks == -1: numFrameChunks = nFrames numChunks = int(ceil(nFrames/numFrameChunks)) - fNr = re.search('\d{% s}' % pad, InputFN).group(0) + fNr = re.search(r'\d{% s}' % pad, InputFN).group(0) fNrOrig = fNr fNrLoc = int(fNr) for fileNrIter in range(numFilesPerScan): @@ -377,14 +386,15 @@ def applyCorrectionNumba(img,dark,darkpreproc,doStd): RingThreshArr = outArr else: RingThreshArr = np.vstack((RingThreshArr,outArr)) - searchStr = 'RingsToExclude' - if line.startswith(f'{searchStr} '): - outArr = np.array([float(x) for x in line.split()[1:3]]).astype(np.double) - outArr = outArr.reshape((1,2)) - if RingExcludeArr[0,0] == 0: - RingExcludeArr = outArr - else: - RingExcludeArr = np.vstack((RingExcludeArr,outArr)) + # searchStr = 'RingsToExclude' + # if line.startswith(f'{searchStr} '): + # outArr = np.array([float(x) for x in line.split()[1:3]]).astype(np.double) + # print(outArr) + # outArr = outArr.reshape((1,2)) + # if RingExcludeArr[0,0] == 0: + # RingExcludeArr = outArr + # else: + # RingExcludeArr = np.vstack((RingExcludeArr,outArr)) searchStr = 'HeadSize' if line.startswith(f'{searchStr} '): head = int(line.split()[1]) diff --git a/utils/integrator.py b/utils/integrator.py index fa38ac0a..9d286740 100755 --- a/utils/integrator.py +++ b/utils/integrator.py @@ -31,7 +31,7 @@ def error(self, message): self.print_help() sys.exit(2) -def generateZip(resFol,pfn,dfn='',dloc='',nchunks=-1,preproc=-1,outf='ZipOut.txt',errf='ZipErr.txt'): +def generateZip(resFol,pfn,dfn='',nchunks=-1,preproc=-1,outf='ZipOut.txt',errf='ZipErr.txt'): cmd = pytpath+' '+os.path.expanduser('~/opt/MIDAS/utils/ffGenerateZip.py')+' -resultFolder '+ resFol +' -paramFN ' + pfn if len(darkFN) != 0: cmd += f' -darkFN {darkFN}' @@ -82,6 +82,7 @@ def runOneFile(fileNr): parser.add_argument('-paramFN', type=str, required=True, help='Parameter file name.') parser.add_argument('-dataFN', type=str, required=True, default='', help='DataFileName for first file, this should have the full path if not in the current folder.') parser.add_argument('-darkFN', type=str, required=False, default='', help='DarkFileName, full path.') +parser.add_argument('-dataLoc', type=str, required=False, default='exchange/data', help='Data location.') parser.add_argument('-numFrameChunks', type=int, required=False, default=-1, help='Number of chunks to use when reading the data file if RAM is smaller than expanded data. -1 will disable.') parser.add_argument('-preProcThresh', type=int, required=False, default=-1, help='If want to save the dark corrected data, then put to whatever threshold wanted above dark. -1 will disable. 0 will just subtract dark. Negative values will be reset to 0.') parser.add_argument('-startFileNr', type=int, required=False, default=-1, help='Which fileNr to start from. Default is -1, which means that fileNr in dataFN is read.') @@ -101,6 +102,7 @@ def runOneFile(fileNr): convertFiles = args.convertFiles mapDetector = args.mapDetector nCPUs = args.nCPUs +dloc = args.dataLoc if len(resultDir) == 0 or resultDir == '.': resultDir = os.getcwd() @@ -113,7 +115,7 @@ def runOneFile(fileNr): startFileNrStr = str(startFileNr).zfill(6) if startFileNr == -1: - startFileNrStr = re.search('\d{% s}' % 6, InputFN) + startFileNrStr = re.search(r'\d{% s}' % 6, InputFN) print(f'Processing file number: {int(startFileNrStr.group(0))}') if not startFileNrStr: print("Could not find 6 padded fileNr. Exiting.") diff --git a/utils/mergeH5s.py b/utils/mergeH5s.py new file mode 100644 index 00000000..13a6e9a3 --- /dev/null +++ b/utils/mergeH5s.py @@ -0,0 +1,36 @@ +import h5py +import argparse +import sys + +class MyParser(argparse.ArgumentParser): + def error(self, message): + sys.stderr.write('error: %s\n' % message) + self.print_help() + sys.exit(2) + +parser = MyParser(description='''Merge two separate files containing data and dark into one, writing a single file.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter) +parser.add_argument('-dataFN', type=str, required=True, help='Data filename.') +parser.add_argument('-darkFN', type=str, required=True, help='Dark filename.') +parser.add_argument('-dataDataLoc', type=str, required=True, help='dataset location with information.') +parser.add_argument('-darkDataLoc', type=str, required=False, default='', help='dataset location in dark with information in case different from data file.') +args, unparsed = parser.parse_known_args() + +dataFN = args.dataFN +darkFN = args.darkFN +dataLoc = args.dataDataLoc +darkDataLoc = args.darkDataLoc + +if len(darkDataLoc) < 2: + darkDataLoc = dataLoc + +with h5py.File(darkFN,'r') as hf: + dark = hf[darkDataLoc][()] +with h5py.File(dataFN,'r') as hf: + data = hf[dataLoc][()] + +dataFNOut = f'{dataFN}.withdark.h5' +with h5py.File(dataFNOut,'w') as hf: + grp = hf.create_group('exchange') + grp.create_dataset('data',shape=data.shape,dtype=data.dtype,data=data) + grp.create_dataset('dark',shape=dark.shape,dtype=dark.dtype,data=dark) +