Single File Calibration¶
by Josh Dillon, Aaron Parsons, Tyler Cox, and Zachary Martinot, last updated May 6, 2024
This notebook is designed to infer as much information about the array from a single file, including pushing the calibration and RFI mitigation as far as possible. Calibration includes redundant-baseline calibration, RFI-based calibration of delay slopes, model-based calibration of overall amplitudes, and a full per-frequency phase gradient absolute calibration if abscal model files are available.
Here's a set of links to skip to particular figures and tables:
• Figure 1: RFI Flagging¶
• Figure 2: Plot of autocorrelations with classifications¶
• Figure 3: Summary of antenna classifications prior to calibration¶
• Figure 4: Redundant calibration of a single baseline group¶
• Figure 5: Absolute calibration of redcal degeneracies¶
• Figure 6: chi^2 per antenna across the array¶
• Figure 7: Summary of antenna classifications after redundant calibration¶
• Table 1: Complete summary of per antenna classifications¶
import time
tstart = time.time()
!hostname
bigmem2.rtp.pvt
import os
os.environ['HDF5_USE_FILE_LOCKING'] = 'FALSE'
import h5py
import hdf5plugin # REQUIRED to have the compression plugins available
import numpy as np
from scipy import constants, interpolate
import copy
import glob
import re
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
pd.set_option('display.max_rows', 1000)
from uvtools.plot import plot_antpos, plot_antclass
from hera_qm import ant_metrics, ant_class, xrfi
from hera_cal import io, utils, redcal, apply_cal, datacontainer, abscal
from hera_filters import dspec
from hera_notebook_templates.data import DATA_PATH as HNBT_DATA
from IPython.display import display, HTML
import linsolve
display(HTML("<style>.container { width:100% !important; }</style>"))
_ = np.seterr(all='ignore') # get rid of red warnings
%config InlineBackend.figure_format = 'retina'
# this enables better memory management on linux
import ctypes
def malloc_trim():
try:
ctypes.CDLL('libc.so.6').malloc_trim(0)
except OSError:
pass
Parse inputs and outputs¶
To use this notebook interactively, you will have to provide a sum filename path if none exists as an environment variable. All other parameters have reasonable default values.
# figure out whether to save results
SAVE_RESULTS = os.environ.get("SAVE_RESULTS", "TRUE").upper() == "TRUE"
SAVE_OMNIVIS_FILE = os.environ.get("SAVE_OMNIVIS_FILE", "FALSE").upper() == "TRUE"
# get infile names
SUM_FILE = os.environ.get("SUM_FILE", None)
# SUM_FILE = '/lustre/aoc/projects/hera/h6c-analysis/IDR2/2459866/zen.2459866.33010.sum.uvh5' # If sum_file is not defined in the environment variables, define it here.
DIFF_FILE = SUM_FILE.replace('sum', 'diff')
# get outfilenames
AM_FILE = (SUM_FILE.replace('.uvh5', '.ant_metrics.hdf5') if SAVE_RESULTS else None)
ANTCLASS_FILE = (SUM_FILE.replace('.uvh5', '.ant_class.csv') if SAVE_RESULTS else None)
OMNICAL_FILE = (SUM_FILE.replace('.uvh5', '.omni.calfits') if SAVE_RESULTS else None)
OMNIVIS_FILE = (SUM_FILE.replace('.uvh5', '.omni_vis.uvh5') if SAVE_RESULTS else None)
for fname in ['SUM_FILE', 'DIFF_FILE', 'AM_FILE', 'ANTCLASS_FILE', 'OMNICAL_FILE', 'OMNIVIS_FILE', 'SAVE_RESULTS', 'SAVE_OMNIVIS_FILE']:
print(f"{fname} = '{eval(fname)}'")
SUM_FILE = '/mnt/sn1/data1/2460455/zen.2460455.34458.sum.uvh5' DIFF_FILE = '/mnt/sn1/data1/2460455/zen.2460455.34458.diff.uvh5' AM_FILE = '/mnt/sn1/data1/2460455/zen.2460455.34458.sum.ant_metrics.hdf5' ANTCLASS_FILE = '/mnt/sn1/data1/2460455/zen.2460455.34458.sum.ant_class.csv' OMNICAL_FILE = '/mnt/sn1/data1/2460455/zen.2460455.34458.sum.omni.calfits' OMNIVIS_FILE = '/mnt/sn1/data1/2460455/zen.2460455.34458.sum.omni_vis.uvh5' SAVE_RESULTS = 'True' SAVE_OMNIVIS_FILE = 'False'
Parse settings¶
Load settings relating to the operation of the notebook, then print what was loaded (or default).
# parse plotting settings
PLOT = os.environ.get("PLOT", "TRUE").upper() == "TRUE"
if PLOT:
%matplotlib inline
# parse omnical settings
OC_MAX_DIMS = int(os.environ.get("OC_MAX_DIMS", 4))
OC_MIN_DIM_SIZE = int(os.environ.get("OC_MIN_DIM_SIZE", 8))
OC_SKIP_OUTRIGGERS = os.environ.get("OC_SKIP_OUTRIGGERS", "FALSE").upper() == "TRUE"
OC_MIN_BL_LEN = float(os.environ.get("OC_MIN_BL_LEN", 1))
OC_MAX_BL_LEN = float(os.environ.get("OC_MAX_BL_LEN", 1e100))
OC_MAXITER = int(os.environ.get("OC_MAXITER", 50))
OC_MAX_RERUN = int(os.environ.get("OC_MAX_RERUN", 4))
OC_RERUN_MAXITER = int(os.environ.get("OC_MAXITER", 25))
OC_MAX_CHISQ_FLAGGING_DYNAMIC_RANGE = float(os.environ.get("OC_MAX_CHISQ_FLAGGING_DYNAMIC_RANGE", 1))
OC_USE_PRIOR_SOL = os.environ.get("OC_USE_PRIOR_SOL", "FALSE").upper() == "TRUE"
OC_PRIOR_SOL_FLAG_THRESH = float(os.environ.get("OC_PRIOR_SOL_FLAG_THRESH", .95))
OC_USE_GPU = os.environ.get("SAVE_RESULTS", "FALSE").upper() == "TRUE"
# parse RFI settings
RFI_DPSS_HALFWIDTH = float(os.environ.get("RFI_DPSS_HALFWIDTH", 300e-9))
RFI_NSIG = float(os.environ.get("RFI_NSIG", 4))
# parse abscal settings
ABSCAL_MODEL_FILES_GLOB = os.environ.get("ABSCAL_MODEL_FILES_GLOB", None)
ABSCAL_MIN_BL_LEN = float(os.environ.get("ABSCAL_MIN_BL_LEN", 1.0))
ABSCAL_MAX_BL_LEN = float(os.environ.get("ABSCAL_MAX_BL_LEN", 140.0))
# print settings
for setting in ['PLOT', 'SAVE_RESULTS', 'OC_MAX_DIMS', 'OC_MIN_DIM_SIZE', 'OC_SKIP_OUTRIGGERS',
'OC_MIN_BL_LEN', 'OC_MAX_BL_LEN', 'OC_MAXITER', 'OC_MAX_RERUN', 'OC_RERUN_MAXITER',
'OC_MAX_CHISQ_FLAGGING_DYNAMIC_RANGE', 'OC_USE_PRIOR_SOL', 'OC_PRIOR_SOL_FLAG_THRESH',
'OC_USE_GPU', 'RFI_DPSS_HALFWIDTH', 'RFI_NSIG', 'ABSCAL_MODEL_FILES_GLOB',
'ABSCAL_MIN_BL_LEN', 'ABSCAL_MAX_BL_LEN']:
print(f'{setting} = {eval(setting)}')
PLOT = True SAVE_RESULTS = True OC_MAX_DIMS = 4 OC_MIN_DIM_SIZE = 8 OC_SKIP_OUTRIGGERS = True OC_MIN_BL_LEN = 1.0 OC_MAX_BL_LEN = 140.0 OC_MAXITER = 40 OC_MAX_RERUN = 5 OC_RERUN_MAXITER = 40 OC_MAX_CHISQ_FLAGGING_DYNAMIC_RANGE = 1.5 OC_USE_PRIOR_SOL = True OC_PRIOR_SOL_FLAG_THRESH = 0.95 OC_USE_GPU = False RFI_DPSS_HALFWIDTH = 3e-07 RFI_NSIG = 4.0 ABSCAL_MODEL_FILES_GLOB = None ABSCAL_MIN_BL_LEN = 1.0 ABSCAL_MAX_BL_LEN = 140.0
Parse bounds¶
Load settings related to classifying antennas as good, suspect, or bad, then print what was loaded (or default).
# ant_metrics bounds for low correlation / dead antennas
am_corr_bad = (0, float(os.environ.get("AM_CORR_BAD", 0.3)))
am_corr_suspect = (float(os.environ.get("AM_CORR_BAD", 0.3)), float(os.environ.get("AM_CORR_SUSPECT", 0.5)))
# ant_metrics bounds for cross-polarized antennas
am_xpol_bad = (-1, float(os.environ.get("AM_XPOL_BAD", -0.1)))
am_xpol_suspect = (float(os.environ.get("AM_XPOL_BAD", -0.1)), float(os.environ.get("AM_XPOL_SUSPECT", 0)))
# bounds on solar altitude (in degrees)
good_solar_altitude = (-90, float(os.environ.get("SUSPECT_SOLAR_ALTITUDE", 0)))
suspect_solar_altitude = (float(os.environ.get("SUSPECT_SOLAR_ALTITUDE", 0)), 90)
# bounds on zeros in spectra
good_zeros_per_eo_spectrum = (0, int(os.environ.get("MAX_ZEROS_PER_EO_SPEC_GOOD", 2)))
suspect_zeros_per_eo_spectrum = (0, int(os.environ.get("MAX_ZEROS_PER_EO_SPEC_SUSPECT", 8)))
# bounds on autocorrelation power
auto_power_good = (float(os.environ.get("AUTO_POWER_GOOD_LOW", 5)), float(os.environ.get("AUTO_POWER_GOOD_HIGH", 30)))
auto_power_suspect = (float(os.environ.get("AUTO_POWER_SUSPECT_LOW", 1)), float(os.environ.get("AUTO_POWER_SUSPECT_HIGH", 60)))
# bounds on autocorrelation slope
auto_slope_good = (float(os.environ.get("AUTO_SLOPE_GOOD_LOW", -0.4)), float(os.environ.get("AUTO_SLOPE_GOOD_HIGH", 0.4)))
auto_slope_suspect = (float(os.environ.get("AUTO_SLOPE_SUSPECT_LOW", -0.6)), float(os.environ.get("AUTO_SLOPE_SUSPECT_HIGH", 0.6)))
# bounds on autocorrelation RFI
auto_rfi_good = (0, float(os.environ.get("AUTO_RFI_GOOD", 1.5)))
auto_rfi_suspect = (0, float(os.environ.get("AUTO_RFI_SUSPECT", 2)))
# bounds on autocorrelation shape
auto_shape_good = (0, float(os.environ.get("AUTO_SHAPE_GOOD", 0.1)))
auto_shape_suspect = (0, float(os.environ.get("AUTO_SHAPE_SUSPECT", 0.2)))
# bound on per-xengine non-noiselike power in diff
bad_xengine_zcut = float(os.environ.get("BAD_XENGINE_ZCUT", 10.0))
# bounds on chi^2 per antenna in omnical
oc_cspa_good = (0, float(os.environ.get("OC_CSPA_GOOD", 2)))
oc_cspa_suspect = (0, float(os.environ.get("OC_CSPA_SUSPECT", 3)))
# print bounds
for bound in ['am_corr_bad', 'am_corr_suspect', 'am_xpol_bad', 'am_xpol_suspect',
'good_solar_altitude', 'suspect_solar_altitude',
'good_zeros_per_eo_spectrum', 'suspect_zeros_per_eo_spectrum',
'auto_power_good', 'auto_power_suspect', 'auto_slope_good', 'auto_slope_suspect',
'auto_rfi_good', 'auto_rfi_suspect', 'auto_shape_good', 'auto_shape_suspect',
'bad_xengine_zcut', 'oc_cspa_good', 'oc_cspa_suspect']:
print(f'{bound} = {eval(bound)}')
am_corr_bad = (0, 0.2) am_corr_suspect = (0.2, 0.4) am_xpol_bad = (-1, -0.1) am_xpol_suspect = (-0.1, 0.0) good_solar_altitude = (-90, 0.0) suspect_solar_altitude = (0.0, 90) good_zeros_per_eo_spectrum = (0, 2) suspect_zeros_per_eo_spectrum = (0, 8) auto_power_good = (5.0, 30.0) auto_power_suspect = (1.0, 60.0) auto_slope_good = (-0.4, 0.4) auto_slope_suspect = (-0.6, 0.6) auto_rfi_good = (0, 1.5) auto_rfi_suspect = (0, 2.0) auto_shape_good = (0, 0.1) auto_shape_suspect = (0, 0.2) bad_xengine_zcut = 10.0 oc_cspa_good = (0, 2.0) oc_cspa_suspect = (0, 3.0)
Load sum and diff data¶
read_start = time.time()
hd = io.HERADataFastReader(SUM_FILE)
data, _, _ = hd.read(read_flags=False, read_nsamples=False)
hd_diff = io.HERADataFastReader(DIFF_FILE)
diff_data, _, _ = hd_diff.read(read_flags=False, read_nsamples=False, dtype=np.complex64)
print(f'Finished loading data in {(time.time() - read_start) / 60:.2f} minutes.')
Finished loading data in 0.52 minutes.
ants = sorted(set([ant for bl in hd.bls for ant in utils.split_bl(bl)]))
auto_bls = [bl for bl in data if (bl[0] == bl[1]) and (utils.split_pol(bl[2])[0] == utils.split_pol(bl[2])[1])]
antpols = sorted(set([ant[1] for ant in ants]))
# print basic information about the file
print(f'File: {SUM_FILE}')
print(f'JDs: {hd.times} ({np.median(np.diff(hd.times)) * 24 * 3600:.5f} s integrations)')
print(f'LSTS: {hd.lsts * 12 / np.pi } hours')
print(f'Frequencies: {len(hd.freqs)} {np.median(np.diff(hd.freqs)) / 1e6:.5f} MHz channels from {hd.freqs[0] / 1e6:.5f} to {hd.freqs[-1] / 1e6:.5f} MHz')
print(f'Antennas: {len(hd.data_ants)}')
print(f'Polarizations: {hd.pols}')
File: /mnt/sn1/data1/2460455/zen.2460455.34458.sum.uvh5 JDs: [2460455.34452446 2460455.34463631] (9.66368 s integrations) LSTS: [13.89160081 13.89429252] hours Frequencies: 1536 0.12207 MHz channels from 46.92078 to 234.29871 MHz Antennas: 252 Polarizations: ['nn', 'ee', 'ne', 'en']
Classify good, suspect, and bad antpols¶
Run ant_metrics
¶
This classifies antennas as cross-polarized, low-correlation, or dead. Such antennas are excluded from any calibration.
am = ant_metrics.AntennaMetrics(SUM_FILE, DIFF_FILE, sum_data=data, diff_data=diff_data)
am.iterative_antenna_metrics_and_flagging(crossCut=am_xpol_bad[1], deadCut=am_corr_bad[1])
am.all_metrics = {} # this saves time and disk by getting rid of per-iteration information we never use
if SAVE_RESULTS:
am.save_antenna_metrics(AM_FILE, overwrite=True)
# Turn ant metrics into classifications
totally_dead_ants = [ant for ant, i in am.xants.items() if i == -1]
am_totally_dead = ant_class.AntennaClassification(good=[ant for ant in ants if ant not in totally_dead_ants], bad=totally_dead_ants)
am_corr = ant_class.antenna_bounds_checker(am.final_metrics['corr'], bad=[am_corr_bad], suspect=[am_corr_suspect], good=[(0, 1)])
am_xpol = ant_class.antenna_bounds_checker(am.final_metrics['corrXPol'], bad=[am_xpol_bad], suspect=[am_xpol_suspect], good=[(-1, 1)])
ant_metrics_class = am_totally_dead + am_corr + am_xpol
if np.all([ant_metrics_class[utils.split_bl(bl)[0]] == 'bad' for bl in auto_bls]):
raise ValueError('All antennas are flagged for ant_metrics.')
Mark sun-up (or high solar altitude) data as suspect¶
min_sun_alt = np.min(utils.get_sun_alt(hd.times))
solar_class = ant_class.antenna_bounds_checker({ant: min_sun_alt for ant in ants}, good=[good_solar_altitude], suspect=[suspect_solar_altitude])
Classify antennas responsible for 0s in visibilities as bad:¶
This classifier looks for X-engine failure or packet loss specific to an antenna which causes either the even visibilities (or the odd ones, or both) to be 0s.
zeros_class = ant_class.even_odd_zeros_checker(data, diff_data, good=good_zeros_per_eo_spectrum, suspect=suspect_zeros_per_eo_spectrum)
if np.all([zeros_class[utils.split_bl(bl)[0]] == 'bad' for bl in auto_bls]):
raise ValueError('All antennas are flagged for too many even/odd zeros.')
Examine and classify autocorrelation power and slope¶
These classifiers look for antennas with too high or low power or to steep a slope.
auto_power_class = ant_class.auto_power_checker(data, good=auto_power_good, suspect=auto_power_suspect)
auto_slope_class = ant_class.auto_slope_checker(data, good=auto_slope_good, suspect=auto_slope_suspect, edge_cut=100, filt_size=17)
if np.all([(auto_power_class + auto_slope_class)[utils.split_bl(bl)[0]] == 'bad' for bl in auto_bls]):
raise ValueError('All antennas are flagged for bad autocorrelation power/slope.')
overall_class = auto_power_class + auto_slope_class + zeros_class + ant_metrics_class + solar_class
Examine and classify autocorrelation excess RFI and shape, finding consensus RFI mask along the way¶
This classifier iteratively identifies antennas for excess RFI (characterized by RMS of DPSS-filtered autocorrelations after RFI flagging) and bad shape, as determined by a discrepancy with the mean good normalized autocorrelation's shape. Along the way, it iteratively discovers a conensus array-wide RFI mask.
def auto_bl_zscores(data, flag_array, cache={}):
'''This function computes z-score arrays for each delay-filtered autocorrelation, normalized by the expected noise.
Flagged times/channels for the whole array are given 0 weight in filtering and are np.nan in the z-score.'''
zscores = {}
for bl in auto_bls:
wgts = np.array(np.logical_not(flag_array), dtype=np.float64)
model, _, _ = dspec.fourier_filter(hd.freqs, data[bl], wgts, filter_centers=[0], filter_half_widths=[RFI_DPSS_HALFWIDTH], mode='dpss_solve',
suppression_factors=[1e-9], eigenval_cutoff=[1e-9], cache=cache)
res = data[bl] - model
int_time = 24 * 3600 * np.median(np.diff(data.times))
chan_res = np.median(np.diff(data.freqs))
int_count = int(int_time * chan_res)
sigma = np.abs(model) / np.sqrt(int_count / 2)
zscores[bl] = res / sigma
zscores[bl][flag_array] = np.nan
return zscores
def rfi_from_avg_autos(data, auto_bls_to_use, prior_flags=None, nsig=RFI_NSIG):
'''Average together all baselines in auto_bls_to_use, then find an RFI mask by looking for outliers after DPSS filtering.'''
# Compute int_count for all unflagged autocorrelations averaged together
int_time = 24 * 3600 * np.median(np.diff(data.times_by_bl[auto_bls[0][0:2]]))
chan_res = np.median(np.diff(data.freqs))
int_count = int(int_time * chan_res) * len(auto_bls_to_use)
avg_auto = {(-1, -1, 'ee'): np.mean([data[bl] for bl in auto_bls_to_use], axis=0)}
# Flag RFI first with channel differences and then with DPSS
antenna_flags, _ = xrfi.flag_autos(avg_auto, int_count=int_count, nsig=(RFI_NSIG * 5))
if prior_flags is not None:
antenna_flags[(-1, -1, 'ee')] = prior_flags
_, rfi_flags = xrfi.flag_autos(avg_auto, int_count=int_count, flag_method='dpss_flagger',
flags=antenna_flags, freqs=data.freqs, filter_centers=[0],
filter_half_widths=[RFI_DPSS_HALFWIDTH], eigenval_cutoff=[1e-9], nsig=nsig)
return rfi_flags
# Find starting set of array flags
antenna_flags, array_flags = xrfi.flag_autos(data, flag_method="channel_diff_flagger", nsig=RFI_NSIG * 5,
antenna_class=overall_class, flag_broadcast_thresh=.5)
for key in antenna_flags:
antenna_flags[key] = array_flags
cache = {}
_, array_flags = xrfi.flag_autos(data, freqs=data.freqs, flag_method="dpss_flagger",
nsig=RFI_NSIG, antenna_class=overall_class,
filter_centers=[0], filter_half_widths=[RFI_DPSS_HALFWIDTH],
eigenval_cutoff=[1e-9], flags=antenna_flags, mode='dpss_matrix',
cache=cache, flag_broadcast_thresh=.5)
# Iteratively develop RFI mask, excess RFI classification, and autocorrelation shape classification
stage = 1
rfi_flags = np.array(array_flags)
prior_end_states = set()
while True:
# compute DPSS-filtered z-scores with current array-wide RFI mask
zscores = auto_bl_zscores(data, rfi_flags)
rms = {bl: np.nanmean(zscores[bl]**2)**.5 if np.any(np.isfinite(zscores[bl])) else np.inf for bl in zscores}
# figure out which autos to use for finding new set of flags
candidate_autos = [bl for bl in auto_bls if overall_class[utils.split_bl(bl)[0]] != 'bad']
if stage == 1:
# use best half of the unflagged antennas
med_rms = np.nanmedian([rms[bl] for bl in candidate_autos])
autos_to_use = [bl for bl in candidate_autos if rms[bl] <= med_rms]
elif stage == 2:
# use all unflagged antennas which are auto RFI good, or the best half, whichever is larger
med_rms = np.nanmedian([rms[bl] for bl in candidate_autos])
best_half_autos = [bl for bl in candidate_autos if rms[bl] <= med_rms]
good_autos = [bl for bl in candidate_autos if (overall_class[utils.split_bl(bl)[0]] != 'bad')
and (auto_rfi_class[utils.split_bl(bl)[0]] == 'good')]
autos_to_use = (best_half_autos if len(best_half_autos) > len(good_autos) else good_autos)
elif stage == 3:
# use all unflagged antennas which are auto RFI good or suspect
autos_to_use = [bl for bl in candidate_autos if (overall_class[utils.split_bl(bl)[0]] != 'bad')]
# compute new RFI flags
rfi_flags = rfi_from_avg_autos(data, autos_to_use)
# perform auto shape and RFI classification
overall_class = auto_power_class + auto_slope_class + zeros_class + ant_metrics_class + solar_class
auto_rfi_class = ant_class.antenna_bounds_checker(rms, good=auto_rfi_good, suspect=auto_rfi_suspect, bad=(0, np.inf))
overall_class += auto_rfi_class
auto_shape_class = ant_class.auto_shape_checker(data, good=auto_shape_good, suspect=auto_shape_suspect,
flag_spectrum=np.sum(rfi_flags, axis=0).astype(bool),
antenna_class=overall_class)
overall_class += auto_shape_class
# check for convergence by seeing whether we've previously gotten to this number of flagged antennas and channels
if stage == 3:
if (len(overall_class.bad_ants), np.sum(rfi_flags)) in prior_end_states:
break
prior_end_states.add((len(overall_class.bad_ants), np.sum(rfi_flags)))
else:
stage += 1
auto_class = auto_power_class + auto_slope_class + auto_rfi_class + auto_shape_class
if np.all([auto_class[utils.split_bl(bl)[0]] == 'bad' for bl in auto_bls]):
raise ValueError('All antennas are flagged for bad autos power/slope/rfi/shape.')
def rfi_plot(cls, flags=rfi_flags):
avg_auto = {(-1, -1, 'ee'): np.mean([data[bl] for bl in auto_bls if not cls[utils.split_bl(bl)[0]] == 'bad'], axis=0)}
plt.figure(figsize=(12, 5), dpi=100)
plt.semilogy(hd.freqs / 1e6, np.where(flags, np.nan, avg_auto[(-1, -1, 'ee')])[0], label = 'Average Good or Suspect Autocorrelation', zorder=100)
plt.semilogy(hd.freqs / 1e6, np.where(False, np.nan, avg_auto[(-1, -1, 'ee')])[0], 'r', lw=.5, label=f'{np.sum(flags[0])} Channels Flagged for RFI')
plt.legend()
plt.xlabel('Frequency (MHz)')
plt.ylabel('Uncalibrated Autocorrelation')
plt.tight_layout()
Figure 1: RFI Flagging¶
This figure shows RFI identified using the average of all autocorrelations---excluding bad antennas---for the first integration in the file.
if PLOT: rfi_plot(overall_class)
def autocorr_plot(cls):
fig, axes = plt.subplots(1, 2, figsize=(14, 5), dpi=100, sharey=True, gridspec_kw={'wspace': 0})
labels = []
colors = ['darkgreen', 'goldenrod', 'maroon']
for ax, pol in zip(axes, antpols):
for ant in cls.ants:
if ant[1] == pol:
color = colors[cls.quality_classes.index(cls[ant])]
ax.semilogy(np.mean(data[utils.join_bl(ant, ant)], axis=0), color=color, lw=.5)
ax.set_xlabel('Channel', fontsize=12)
ax.set_title(f'{utils.join_pol(pol, pol)}-Polarized Autos')
axes[0].set_ylabel('Raw Autocorrelation', fontsize=12)
axes[1].legend([matplotlib.lines.Line2D([0], [0], color=color) for color in colors],
[cl.capitalize() for cl in cls.quality_classes], ncol=1, fontsize=12, loc='upper right', framealpha=1)
plt.tight_layout()
Figure 2: Plot of autocorrelations with classifications¶
This figure shows a plot of all autocorrelations in the array, split by polarization. Antennas are classified based on their autocorrelations into good, suspect, and bad, by examining power, slope, and RFI-occupancy.
if PLOT: autocorr_plot(auto_class)