Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

NEXRAD Updates #1251

Merged
merged 8 commits into from Dec 22, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion .appveyor.yml
Expand Up @@ -21,7 +21,7 @@ install:
{ $env:CONDA_PATH="$($env:CONDA_PATH)37" }
- ps: if($env:PLATFORM -eq 'x64')
{ $env:CONDA_PATH="$($env:CONDA_PATH)-x64" }
- ps: $env:path="$($env:CONDA_PATH);$($env:CONDA_PATH)\Scripts;$($env:CONDA_PATH)\Library\bin;C:\cygwin\bin;$($env:PATH)"
- ps: $env:path="$($env:CONDA_PATH);$($env:CONDA_PATH)\Scripts;$($env:CONDA_PATH)\Library\bin;$($env:PATH)"
- cmd: conda config --set always_yes yes --set changeps1 no
- cmd: conda update -q conda
# Useful for debugging any issues with conda
Expand Down
3 changes: 0 additions & 3 deletions docs/Makefile
Expand Up @@ -4,7 +4,6 @@
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SPHINXAUTOGEN = sphinx-autogen
SPHINXPROJ = MetPy
SOURCEDIR = .
BUILDDIR = build
Expand All @@ -28,6 +27,4 @@ overridecheck:
# Manual autogen needed so we can specify the -i option so that imported names
# are included in generation
%: Makefile
echo Running sphinx-autogen
@$(SPHINXAUTOGEN) -i -t $(SOURCEDIR)/_templates -o $(SOURCEDIR)/api/generated $(SOURCEDIR)/api/*.rst
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
6 changes: 5 additions & 1 deletion docs/conf.py
Expand Up @@ -25,7 +25,7 @@
# -- General configuration ------------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.8'
needs_sphinx = '2.1'

# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
Expand Down Expand Up @@ -82,6 +82,10 @@
# The suffix of source filenames.
source_suffix = ['.rst', '.md']

# Controlling automatically generating summary tables in the docs
autosummary_generate = True
autosummary_imported_members = True

# The encoding of source files.
# source_encoding = 'utf-8-sig'

Expand Down
2 changes: 1 addition & 1 deletion docs/infrastructureguide.rst
Expand Up @@ -31,7 +31,7 @@ This will also create a new stable set of documentation.
Documentation
-------------

MetPy's documentation is built using sphinx >= 1.8. API documentation is automatically
MetPy's documentation is built using sphinx >= 2.1. API documentation is automatically
generated from docstrings, written using the
`NumPy docstring standard <https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt>`_.
There are also example scripts in the ``examples`` directory. Using the ``sphinx-gallery``
Expand Down
2 changes: 1 addition & 1 deletion setup.cfg
Expand Up @@ -52,7 +52,7 @@ where = src

[options.extras_require]
dev = ipython[all]>=3.1
doc = sphinx>=1.8; sphinx-gallery>=0.4; doc8; m2r; netCDF4
doc = sphinx>=2.1; sphinx-gallery>=0.4; doc8; m2r; netCDF4
examples = cartopy>=0.13.1; matplotlib>=2.2.0; pyproj>=1.9.4,!=2.0.0
test = pytest>=2.4; pytest-mpl; pytest-flake8; cartopy>=0.16.0; flake8>3.2.0; flake8-builtins!=1.4.0; flake8-comprehensions; flake8-copyright; flake8-docstrings; flake8-import-order; flake8-mutable; flake8-pep3101; flake8-print; flake8-quotes; flake8-rst-docstrings; pep8-naming; netCDF4; pyproj>=1.9.4,!=2.0.0

Expand Down
59 changes: 40 additions & 19 deletions src/metpy/io/nexrad.py
Expand Up @@ -219,7 +219,7 @@ def _read_data(self):

# Read the message header
msg_hdr = self._buffer.read_struct(self.msg_hdr_fmt)
log.debug('Got message: %s', str(msg_hdr))
log.debug('Got message: %s (at offset %d)', str(msg_hdr), self._buffer._offset)

# The AR2_BLOCKSIZE accounts for the CTM header before the
# data, as well as the Frame Check Sequence (4 bytes) after
Expand Down Expand Up @@ -340,7 +340,7 @@ def _decode_msg1(self, msg_hdr):
'Generator On',
'Transfer Switch Manual',
'Commanded Switchover')),
('avg_tx_pwr', 'H'), ('ref_calib_cor', 'h'),
('avg_tx_pwr', 'H'), ('ref_calib_cor', 'h', scaler(0.01)),
('data_transmission_enabled', 'H', BitField('None', 'None',
'Reflectivity', 'Velocity', 'Width')),
('vcp_num', 'h'), ('rda_control_auth', 'H', BitField('No Action',
Expand All @@ -362,19 +362,31 @@ def _decode_msg1(self, msg_hdr):
('spot_blanking', 'H', BitField('Enabled', 'Disabled')),
('bypass_map_gen_date', 'H'), ('bypass_map_gen_time', 'H'),
('clutter_filter_map_gen_date', 'H'), ('clutter_filter_map_gen_time', 'H'),
(None, '2x'),
('refv_calib_cor', 'h', scaler(0.01)),
('transition_pwr_src_state', 'H', BitField('Off', 'OK')),
('RMS_control_status', 'H', BitField('RMS in control', 'RDA in control')),
# See Table IV-A for definition of alarms
(None, '2x'), ('alarms', '28s', Array('>14H'))], '>', 'Msg2Fmt')

msg2_additional_fmt = NamedStruct([
('sig_proc_options', 'H', BitField('CMD RhoHV Test')),
(None, '36x'), ('status_version', 'H')], '>', 'Msg2AdditionalFmt')

def _decode_msg2(self, msg_hdr):
msg_start = self._buffer.set_mark()
self.rda_status.append(self._buffer.read_struct(self.msg2_fmt))

# RDA Build 18.0 expanded the size, but only with spares for now
extra_size = 40 if self.rda_status[-1].rda_build >= '18.0' else 0
remaining = (msg_hdr.size_hw * 2 - self.msg_hdr_fmt.size
- self._buffer.offset_from(msg_start))

# RDA Build 18.0 expanded the size
if remaining >= self.msg2_additional_fmt.size:
self.rda_status.append(self._buffer.read_struct(self.msg2_additional_fmt))
remaining -= self.msg2_additional_fmt.size

self._check_size(msg_hdr, self.msg2_fmt.size + extra_size)
if remaining:
log.info('Padding detected in message 2. Length encoded as %d but offset when '
'done is %d', 2 * msg_hdr.size_hw, self._buffer.offset_from(msg_start))

def _decode_msg3(self, msg_hdr):
from ._nexrad_msgs.msg3 import descriptions, fields
Expand Down Expand Up @@ -439,8 +451,9 @@ def _decode_msg13(self, msg_hdr):
for e in range(num_el):
seg_num = data[offset]
offset += 1
assert seg_num == (e + 1), ('Message 13 segments out of sync --'
' read {} but on {}'.format(seg_num, e + 1))
if seg_num != (e + 1):
log.warning('Message 13 segments out of sync -- read {} but on {}'.format(
seg_num, e + 1))

az_data = []
for _ in range(360):
Expand Down Expand Up @@ -568,11 +581,13 @@ def _decode_msg31(self, msg_hdr):
msg_start = self._buffer.set_mark()
data_hdr = self._buffer.read_struct(self.msg31_data_hdr_fmt)

# Read all the data block pointers separately. This simplifies just
# iterating over them
ptrs = self._buffer.read_binary(6, '>L')
# Read all the data block pointers separately. This makes it easy to loop and to
# handle the arbitrary numbers. We subtract 3 for the VOL, ELV, and RAD blocks that
# are required to be present (and can't be read like the data)
ptrs = self._buffer.read_binary(data_hdr.num_data_blks - 3, '>L')

assert data_hdr.compression == 0, 'Compressed message 31 not supported!'
if data_hdr.compression:
log.warning('Compressed message 31 not supported!')

self._buffer.jump_to(msg_start, data_hdr.vol_const_ptr)
vol_consts = self._buffer.read_struct(self.msg31_vol_const_fmt)
Expand All @@ -581,11 +596,15 @@ def _decode_msg31(self, msg_hdr):
el_consts = self._buffer.read_struct(self.msg31_el_const_fmt)

self._buffer.jump_to(msg_start, data_hdr.rad_const_ptr)
# Major version jumped with Build 14.0
if vol_consts.major < 2:
rad_consts = self._buffer.read_struct(self.rad_const_fmt_v1)
else:

# Look ahead to figure out how big the block is
jmp = self._buffer.set_mark()
size = self._buffer.read_binary(3, '>H')[-1]
self._buffer.jump_to(jmp)
if size == self.rad_const_fmt_v2.size:
rad_consts = self._buffer.read_struct(self.rad_const_fmt_v2)
else:
rad_consts = self._buffer.read_struct(self.rad_const_fmt_v1)

data = {}
block_count = 3
Expand All @@ -607,8 +626,11 @@ def _decode_msg31(self, msg_hdr):

if data_hdr.num_data_blks != block_count:
log.warning('Incorrect number of blocks detected -- Got %d'
'instead of %d', block_count, data_hdr.num_data_blks)
assert data_hdr.rad_length == self._buffer.offset_from(msg_start)
' instead of %d', block_count, data_hdr.num_data_blks)

if data_hdr.rad_length != self._buffer.offset_from(msg_start):
log.info('Padding detected in message. Length encoded as %d but offset when '
'done is %d', data_hdr.rad_length, self._buffer.offset_from(msg_start))

def _buffer_segment(self, msg_hdr):
# Add to the buffer
Expand Down Expand Up @@ -1549,7 +1571,6 @@ def __init__(self, filename):
self._process_end_bytes()

# Set up places to store data and metadata
# self.data = []
self.metadata = {}

# Handle free text message products that are pure text
Expand Down
1 change: 1 addition & 0 deletions src/metpy/static-data-manifest.txt
Expand Up @@ -6,6 +6,7 @@ KTLX19990503_235621.gz 7a097251bb7a15dbcdec75812812e41a86c5eb9850f55c3d91d120c2c
KTLX20130520_201643_V06.gz 772e01b154a5c966982a6d0aa2fc78bc64f08a9b77165b74dc02d7aa5aa69275
KTLX20150530_000802_V06.bz2 d78689afc525c853dec8ccab4a4eccc2daacef5c7df198a35a3a791016e993b0
Level2_KFTG_20150430_1419.ar2v 77c3355c8a503561eb3cddc3854337e640d983a4acdfc27bdfbab60c0b18cfc1
TDAL20191021021543V08.raw.gz db299c0f31f1396caddb92ed1517d30494a6f47ca994138f85c925787176a0ef
Level3_Composite_dhr_1km_20180309_2225.gini 19fcc0179c9d3e87c462262ea817e87f52f60db4830314b8f936baa3b9817a44
NAM_test.nc 12338ad06d5bd223e99e2872b20a9c80d58af0c546731e4b00a6619adc247cd0
NHEM-MULTICOMP_1km_IR_20151208_2100.gini c144b29284aa915e6fd1b8f01939c656f2c72c3d7a9e0af5397f93067fe0d952
Expand Down
Binary file added staticdata/TDAL20191021021543V08.raw.gz
Binary file not shown.
27 changes: 20 additions & 7 deletions tests/io/test_nexrad.py
Expand Up @@ -23,21 +23,25 @@
# 1999 file tests old message 1
# KFTG tests bzip compression and newer format for a part of message 31
# KTLX 2015 has missing segments for message 18, which was causing exception
level2_files = [('KTLX20130520_201643_V06.gz', datetime(2013, 5, 20, 20, 16, 46), 17),
('KTLX19990503_235621.gz', datetime(1999, 5, 3, 23, 56, 21), 16),
('Level2_KFTG_20150430_1419.ar2v', datetime(2015, 4, 30, 14, 19, 11), 12),
('KTLX20150530_000802_V06.bz2', datetime(2015, 5, 30, 0, 8, 3), 14),
('KICX_20170712_1458', datetime(2017, 7, 12, 14, 58, 5), 14)]
level2_files = [('KTLX20130520_201643_V06.gz', datetime(2013, 5, 20, 20, 16, 46), 17, 4, 6),
('KTLX19990503_235621.gz', datetime(1999, 5, 3, 23, 56, 21), 16, 1, 3),
('Level2_KFTG_20150430_1419.ar2v', datetime(2015, 4, 30, 14, 19, 11),
12, 4, 6),
('KTLX20150530_000802_V06.bz2', datetime(2015, 5, 30, 0, 8, 3), 14, 4, 6),
('KICX_20170712_1458', datetime(2017, 7, 12, 14, 58, 5), 14, 4, 6),
('TDAL20191021021543V08.raw.gz', datetime(2019, 10, 21, 2, 15, 43), 10, 1, 3)]


# ids here fixes how things are presented in pycharm
@pytest.mark.parametrize('fname, voltime, num_sweeps', level2_files,
@pytest.mark.parametrize('fname, voltime, num_sweeps, mom_first, mom_last', level2_files,
ids=[i[0].replace('.', '_') for i in level2_files])
def test_level2(fname, voltime, num_sweeps):
def test_level2(fname, voltime, num_sweeps, mom_first, mom_last):
"""Test reading NEXRAD level 2 files from the filename."""
f = Level2File(get_test_data(fname, as_file_obj=False))
assert f.dt == voltime
assert len(f.sweeps) == num_sweeps
assert len(f.sweeps[0][0][-1]) == mom_first
assert len(f.sweeps[-1][0][-1]) == mom_last


def test_level2_fobj():
Expand All @@ -53,6 +57,15 @@ def test_doubled_file():
assert len(f.sweeps) == 12


@pytest.mark.parametrize('fname, has_v2', [('KTLX20130520_201643_V06.gz', False),
('Level2_KFTG_20150430_1419.ar2v', True),
('TDAL20191021021543V08.raw.gz', False)])
def test_conditional_radconst(fname, has_v2):
"""Test whether we're using the right volume constants."""
f = Level2File(get_test_data(fname, as_file_obj=False))
assert hasattr(f.sweeps[0][0][3], 'calib_dbz0_v') == has_v2


#
# NIDS/Level 3 Tests
#
Expand Down