Note: The default ITS GitLab runner is a shared resource and is subject to slowdowns during heavy usage.
You can run your own GitLab runner that is dedicated just to your group if you need to avoid processing delays.

Commit 26a66ca6 authored by Qusai Al Shidi's avatar Qusai Al Shidi
Browse files

Fixed WDC functions in swmfpy.io

parent cb2f765a
File mode changed from 100644 to 100755
...@@ -20,49 +20,130 @@ def read_wdc_ae(wdc_filename): ...@@ -20,49 +20,130 @@ def read_wdc_ae(wdc_filename):
wdc_filename (str): Filename of wdc data from wdc_filename (str): Filename of wdc data from
http://wdc.kugi.kyoto-u.ac.jp/ http://wdc.kugi.kyoto-u.ac.jp/
Returns: Returns:
dict: {"time" (datetime.datetime): list of datetime objects dict: {
corresponding to time in UT. Auroral indeces 'AL', 'AE', 'AO', 'AU' (int): {
"AL", "AE", "AO", "AU" (int): Auroral indeces. 'times' (datetime.datetime): List of datetime objects
corresponding to time in UT.
'values' (int): List of indeces.
} }
""" """
data = {'AL': {'Time': [], 'Index': []},
'AE': {'Time': [], 'Index': []}, # Initialize return data
'AO': {'Time': [], 'Index': []}, return_data = {'AL': {'times': [], 'values': []},
'AU': {'Time': [], 'Index': []}} 'AE': {'times': [], 'values': []},
'AO': {'times': [], 'values': []},
'AU': {'times': [], 'values': []}}
# Open and make sure it is correct file
with open(wdc_filename) as wdc_file: with open(wdc_filename) as wdc_file:
header = wdc_file.readline()
assert header[:8] == 'AEALAOAU', ('Does not seem to be a WDC AE file.'
+ 'First 8 characters: ' + header[:8]
)
# Parse
for line in wdc_file: for line in wdc_file:
ind_data = line.split() data = line.split()
for minute in range(60): year_suffix = int(data[1][:2])
# TODO: Use .zfill()? if year_suffix < 50:
str_min = str(minute) year = 2000 + year_suffix
if minute < 10: else:
str_min = '0' + str_min year = 1990 + year_suffix
time = dt.datetime.strptime(ind_data[1][:-5] month = int(data[1][2:4])
+ ind_data[1][7:-2] day = int(data[1][4:6])
+ str_min, hour = int(data[1][7:9])
'%y%m%d%H%M') index = data[1][-2:]
data[ind_data[1][-2:]]['Time'] += [time] values_60 = [int(val) for val in data[3:60+3]]
data[ind_data[1][-2:]]['Index'] += [int(ind_data[3+minute])]
return data # Fill
for minute, value in enumerate(values_60):
return_data[index]['values'].append(value)
return_data[index]['times'].append(
dt.datetime(year, month, day, hour, minute))
return return_data
def read_wdc_asy_sym(wdc_filename): def read_wdc_asy_sym(wdc_filename):
"""Docstring """Reads a WDC file for ASY/SYM data.
Reads an ASY/SYM file downloaded from
http://wdc.kugi.kyoto-u.ac.jp/aeasy/index.html
and puts it into a dictionary.
Args:
wdc_filename (str): Relative filename to read.
Returns:
dict: of values.
{'[ASY-SYM]-[D-H]': 'times': [], 'values': []}
Examples:
```
indeces = swmfpy.io.read_wdc_asy_sym('wdc.dat')
# Plot data
plt.plot(indeces['SYM-H']['times'],
indeces['SYM-H']['values'],
label='SYM-H [nT]'
)
plt.xlabel('Time [UT]')
```
Important to note if there is bad data it will be filled as None.
""" """
return_data = {'ASY': {'Time': [], 'Index': []}, return_data = {
'SYM': {'Time': [], 'Index': []}} 'ASY-D': {
'times': [],
'values': [],
},
'ASY-H': {
'times': [],
'values': [],
},
'SYM-D': {
'times': [],
'values': [],
},
'SYM-H': {
'times': [],
'values': []
}
}
with open(wdc_filename) as wdc_file: with open(wdc_filename) as wdc_file:
header = wdc_file.readline()[:6] # Check for correct file
assert header == 'ASYSYM', ('File does not seem to be' header = wdc_file.readline()
+ 'an ASY/SYM file from wdc.' assert header[:12] == 'ASYSYM N6E01', ('File does not seem to be'
+ 'First six characters: ' + 'an ASY/SYM file from wdc.'
+ header) + 'First 12 characters: '
+ header)
return_data['edition'] = header[24:34]
for line in wdc_file: for line in wdc_file:
# Parse
year = int(line[12:14])
month = int(line[14:16])
day = int(line[16:18])
hour = int(line[19:21])
comp = line[18]
index = line[21:24]
# Fill 60 min data
data = line.split() data = line.split()
values_60 = [int(val) for val in data[2:62]]
for minute, value in enumerate(values_60):
return_data[index+'-'+comp]['times'].append(
dt.datetime(year, month, day, hour, minute))
# Check if data is bad
if value != 99999:
return_data[index+'-'+comp]['values'].append(
value)
else:
return_data[index+'-'+comp]['values'].append(
None)
return_data return return_data
def read_omni_csv(filename, filtering=False, **kwargs): def read_omni_csv(filename, filtering=False, **kwargs):
...@@ -87,10 +168,11 @@ def read_omni_csv(filename, filtering=False, **kwargs): ...@@ -87,10 +168,11 @@ def read_omni_csv(filename, filtering=False, **kwargs):
the header seperated into a json file for safety. the header seperated into a json file for safety.
This only tested with OMNI data specifically. This only tested with OMNI data specifically.
""" """
# TODO: This needs a lot of work
import pandas as pd import pandas as pd
# Read the csv files and set the index to dates # Read the csv files and set the index to dates
colnames = ['Time', 'Bx [nT]', 'By [nT]', 'Bz [nT]', colnames = ['Time', 'Bx [nT]', 'By [nT]', 'Bz [nT]',
'Vx [km/s]', 'Vy [km/s]', 'Vz [km/s]', 'Vx [km/s]', 'Vy [km/s]', 'Vz [km/s]',
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment