io.py 13.3 KB
Newer Older
Qusai Al Shidi's avatar
Qusai Al Shidi committed
1
2
3
4
5
"""Input/Output tools

Here are tools to read and write files relating to SWMF.

TODO: Move pandas dependancy elsewhere.
Qusai Al Shidi's avatar
Qusai Al Shidi committed
6
7
8
9
10
11
12
"""

import datetime as dt
import numpy as np


def read_wdc_ae(wdc_filename):
13
14
    """Read an auroral electrojet (AE) indeces from Kyoto's World Data Center
       text file into a dictionary of lists.
Qusai Al Shidi's avatar
Qusai Al Shidi committed
15
16
17
18
19

    Args:
        wdc_filename (str): Filename of wdc data from
                            http://wdc.kugi.kyoto-u.ac.jp/
    Returns:
20
21
22
23
24
        dict: {
               Auroral indeces 'AL', 'AE', 'AO', 'AU' (int): {
                    'times' (datetime.datetime): List of datetime objects
                                                 corresponding to time in UT.
                    'values' (int): List of indeces.
Qusai Al Shidi's avatar
Qusai Al Shidi committed
25
26
              }
    """
27
28
29
30
31
32
33
34

    # Initialize return data
    return_data = {'AL': {'times': [], 'values': []},
                   'AE': {'times': [], 'values': []},
                   'AO': {'times': [], 'values': []},
                   'AU': {'times': [], 'values': []}}

    # Open and make sure it is correct file
35
    with open(wdc_filename, 'rt') as wdc_file:
36
        header = wdc_file.readline()
37
38
        assert header[:8] == 'AEALAOAU', \
            'Does not seem to be a WDC AE file. First 8 chars: ' + header[:8]
39
40

        # Parse
Qusai Al Shidi's avatar
Qusai Al Shidi committed
41
        for line in wdc_file:
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
            data = line.split()
            year_suffix = int(data[1][:2])
            if year_suffix < 50:
                year = 2000 + year_suffix
            else:
                year = 1990 + year_suffix
            month = int(data[1][2:4])
            day = int(data[1][4:6])
            hour = int(data[1][7:9])
            index = data[1][-2:]
            values_60 = [int(val) for val in data[3:60+3]]

            # Fill
            for minute, value in enumerate(values_60):
                return_data[index]['values'].append(value)
                return_data[index]['times'].append(
                    dt.datetime(year, month, day, hour, minute))

    return return_data
Qusai Al Shidi's avatar
Qusai Al Shidi committed
61
62


63
def read_wdc_asy_sym(wdc_filename):
64
65
66
67
68
69
70
    """Reads a WDC file for ASY/SYM data.

    Reads an ASY/SYM file downloaded from
    http://wdc.kugi.kyoto-u.ac.jp/aeasy/index.html
    and puts it into a dictionary.

    Args:
71
        wdc_filename (str): Relative filename (or file handle no.) to read.
72
73
74
75
76
77

    Returns:
        dict: of values.
        {'[ASY-SYM]-[D-H]': 'times': [], 'values': []}

    Examples:
Qusai Al Shidi's avatar
Qusai Al Shidi committed
78
79
        ```python

80
81
82
83
84
85
86
        indeces = swmfpy.io.read_wdc_asy_sym('wdc.dat')
        # Plot data
        plt.plot(indeces['SYM-H']['times'],
                 indeces['SYM-H']['values'],
                 label='SYM-H [nT]'
                 )
        plt.xlabel('Time [UT]')
Qusai Al Shidi's avatar
Qusai Al Shidi committed
87

88
89
90
        ```

    Important to note if there is bad data it will be filled as None.
91
92
    """

93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
    return_data = {
        'ASY-D': {
            'times': [],
            'values': [],
            },
        'ASY-H': {
            'times': [],
            'values': [],
            },
        'SYM-D': {
            'times': [],
            'values': [],
            },
        'SYM-H': {
            'times': [],
            'values': []
            }
        }
111
112

    with open(wdc_filename) as wdc_file:
113
114
115
116
117
118
119
120
        # Check for correct file
        header = wdc_file.readline()
        assert header[:12] == 'ASYSYM N6E01', ('File does not seem to be'
                                               + 'an ASY/SYM file from wdc.'
                                               + 'First 12 characters: '
                                               + header)
        return_data['edition'] = header[24:34]

121
        for line in wdc_file:
122
123
            # Parse
            year = int(line[12:14])
124
125
126
127
            if year < 70:  # Starts from 1970 but only gives 2 digits
                year += 2000
            else:
                year += 1900
128
129
130
131
132
133
134
            month = int(line[14:16])
            day = int(line[16:18])
            hour = int(line[19:21])
            comp = line[18]
            index = line[21:24]

            # Fill 60 min data
135
            data = line.split()
136
137
138
139
140
141
142
143
144
145
146
            values_60 = [int(val) for val in data[2:62]]
            for minute, value in enumerate(values_60):
                return_data[index+'-'+comp]['times'].append(
                    dt.datetime(year, month, day, hour, minute))
                # Check if data is bad
                if value != 99999:
                    return_data[index+'-'+comp]['values'].append(
                        value)
                else:
                    return_data[index+'-'+comp]['values'].append(
                        None)
147

148
    return return_data
149
150


Qusai Al Shidi's avatar
Qusai Al Shidi committed
151
152
153
154
155
def read_omni_csv(filename, filtering=False, **kwargs):
    """Take an OMNI csv file from cdaweb.sci.gsfc.nasa.gov
    and turn it into a pandas.DataFrame.

    Args:
156
157
158
159
        fnames (dict): dict with filenames from omni .lst files.
                       The keys must be: density, temperature,
                                         magnetic_field, velocity
        filtering (bool): default=False Remove points where the value
Qusai Al Shidi's avatar
Qusai Al Shidi committed
160
                          is >sigma (default: sigma=3) from mean.
161
162
163
164
        **kwargs:
            coarseness (int): default=3, Number of standard deviations
                              above which to remove if filtering=True.
            clean (bool): default=True, Clean the omni data of bad data points
Qusai Al Shidi's avatar
Qusai Al Shidi committed
165

166
167
    Returns:
        pandas.DataFrame: object with solar wind data
Qusai Al Shidi's avatar
Qusai Al Shidi committed
168
169
170
171
172
173

    Make sure to download the csv files with cdaweb.sci.gsfc.nasa.gov
    the header seperated into a json file for safety.

    This only tested with OMNI data specifically.
    """
174
175
    # TODO: This needs a lot of work

Qusai Al Shidi's avatar
Qusai Al Shidi committed
176
    import pandas as pd
177

Qusai Al Shidi's avatar
Qusai Al Shidi committed
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
    # Read the csv files and set the index to dates
    colnames = ['Time', 'Bx [nT]', 'By [nT]', 'Bz [nT]',
                'Vx [km/s]', 'Vy [km/s]', 'Vz [km/s]',
                'Rho [n/cc]', 'T [K]']
    with open(filename, 'r') as datafile:
        data = pd.read_csv(datafile, names=colnames, skiprows=1)
    data.set_index(pd.to_datetime(data[data.columns[0]]), inplace=True)
    data.drop(columns=data.columns[0], inplace=True)
    data.index.name = "Time [UT]"

    # clean bad data
    if kwargs.get('clean', True):
        data["By [nT]"] = data["By [nT]"][data["By [nT]"].abs() < 80.]
        data["Bx [nT]"] = data["Bx [nT]"][data["Bx [nT]"].abs() < 80.]
        data["Bz [nT]"] = data["Bz [nT]"][data["Bz [nT]"].abs() < 80.]
        data["Rho [n/cc]"] = data["Rho [n/cc]"][data["Rho [n/cc]"] < 500.]
        data["Vx [km/s]"] = data["Vx [km/s]"][data["Vx [km/s]"].abs() < 2000.]
        data["Vz [km/s]"] = data["Vz [km/s]"][data["Vz [km/s]"].abs() < 1000.]
        data["Vy [km/s]"] = data["Vy [km/s]"][data["Vy [km/s]"].abs() < 1000.]
        data["T [K]"] = data["T [K]"][data["T [K]"] < 1.e7]

    if filtering:
200
        _coarse_filtering(data, kwargs.get('coarseness', 3))
Qusai Al Shidi's avatar
Qusai Al Shidi committed
201
202
203
    return data.interpolate().bfill().ffill()


204
def _coarse_filtering(data, coarseness=3):
Qusai Al Shidi's avatar
Qusai Al Shidi committed
205
206
207
208
209
210
211
    """Applies coarse filtering to a pandas.DataFrame"""
    for column in data.columns:
        mean = data[column].abs().mean()
        sigma = data[column].std()
        data[column] = data[data[column].abs() < mean+coarseness*sigma][column]


212
def write_imf_input(data, outfilename="IMF.dat", enable_rb=True, **kwargs):
Qusai Al Shidi's avatar
Qusai Al Shidi committed
213
214
215
216
217
    """Writes the pandas.DataFrame into an input file
    that SWMF can read as input IMF (IMF.dat).

    Args:
        data: pandas.DataFrame object with solar wind data
Qusai Al Shidi's avatar
Qusai Al Shidi committed
218
219
220
221
        outfilename: The output file name for ballistic solar wind data.
                     (default: "IMF.dat")
        enable_rb: Enables solar wind input for the radiation belt model.
                   (default: True)
Qusai Al Shidi's avatar
Qusai Al Shidi committed
222
223
224
225
226

    Other paramaters:
        gse: (default=False)
            Use GSE coordinate system for the file instead of GSM default.
    """
Qusai Al Shidi's avatar
Qusai Al Shidi committed
227

Qusai Al Shidi's avatar
Qusai Al Shidi committed
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
    # Generate BATS-R-US solar wind input file
    with open(outfilename, 'w') as outfile:
        outfile.write("CSV files downloaded from ")
        outfile.write("https://cdaweb.gsfc.nasa.gov/\n")
        if kwargs.get('gse', False):
            outfile.write("#COOR\nGSE\n")
        outfile.write("yr mn dy hr min sec msec bx by bz vx vy vz dens temp\n")
        outfile.write("#START\n")
        for index, rows in data.iterrows():
            outfile.write(index.strftime("%Y %m %d %H %M %S") + ' ')
            outfile.write(index.strftime("%f")[:3] + ' ')
            outfile.write(str(rows['Bx [nT]'])[:7] + ' ')
            outfile.write(str(rows['By [nT]'])[:7] + ' ')
            outfile.write(str(rows['Bz [nT]'])[:7] + ' ')
            outfile.write(str(rows['Vx [km/s]'])[:7] + ' ')
            outfile.write(str(rows['Vy [km/s]'])[:7] + ' ')
            outfile.write(str(rows['Vz [km/s]'])[:7] + ' ')
            outfile.write(str(rows['Rho [n/cc]'])[:7] + ' ')
            outfile.write(str(rows['T [K]'])[:7] + ' ')
            outfile.write('\n')
    # Generate RBE model solar wind input file
    if enable_rb:
        with open("RB.SWIMF", 'w') as rbfile:
            # Choose first element as t=0 header (unsure if this is safe)
            rbfile.write(data.index[0].strftime("%Y, %j, %H ")
                         + "! iyear, iday, ihour corresponding to t=0\n")
            swlag_time = None
            if swlag_time in kwargs:
                rbfile.write(str(kwargs["swlag_time"]) + "  "
                             + "! swlag time in seconds "
                             + "for sw travel to subsolar\n")
            # Unsure what 11902 means but following example file
            rbfile.write("11902 data                   "
                         + "P+ NP NONLIN    P+ V (MOM)\n")
            # Quantity header
            rbfile.write("dd mm yyyy hh mm ss.ms           "
                         + "#/cc          km/s\n")
            for index, rows in data.iterrows():
                rbfile.write(index.strftime("%d %m %Y %H %M %S.%f")
                             + "     "
                             + str(rows['Rho [n/cc]'])[:8]
                             + "     "
                             # Speed magnitude
                             + str(np.sqrt(rows['Vx [km/s]']**2
                                           + rows['Vy [km/s]']**2
                                           + rows['Vz [km/s]']**2))[:8]
                             + '\n')


277
def read_gm_log(filename, colnames=None, dtypes=None, index_time=True):
278
    """Make a dictionary out of the indeces outputted
Qusai Al Shidi's avatar
Qusai Al Shidi committed
279
280
281
    from the GM model log.

    Args:
282
        filename (str): The relative filename as a string. (or file handle no.)
283
284
285
        colnames ([str]): (default: None) Supply the name of the columns.
                                          If None it will use second line
                                          of log file.
286
287
        dtypes ([types]): (default: None) Provide types for the columns, if
                                          None then all will be float.
288
289
290
        index_time (bool): (default: True) Make a column of dt.datetime objects
                                           in dictionary key 'Time [UT]'.

Qusai Al Shidi's avatar
Qusai Al Shidi committed
291
    Returns:
292
        Dictionary of the log file
Qusai Al Shidi's avatar
Qusai Al Shidi committed
293
294

    Examples:
Qusai Al Shidi's avatar
Qusai Al Shidi committed
295
        To plot AL and Dst get the log files
Qusai Al Shidi's avatar
Qusai Al Shidi committed
296
297
        ```python

298
299
        geo = swmfpy.io.read_gm_log('run/GM/IO2/geoindex_e20140215-100500.log')
        dst = swmfpy.io.read_gm_log('run/GM/IO2/log_e20140215-100500.log')
Qusai Al Shidi's avatar
Qusai Al Shidi committed
300

301
        # Plot AL indeces
302
        plt.plot(geo['times', geo['AL'])
Qusai Al Shidi's avatar
Qusai Al Shidi committed
303

Qusai Al Shidi's avatar
Qusai Al Shidi committed
304
        ```
Qusai Al Shidi's avatar
Qusai Al Shidi committed
305

306
    """
Qusai Al Shidi's avatar
Qusai Al Shidi committed
307

Qusai Al Shidi's avatar
Qusai Al Shidi committed
308
    # If column names were not specified
309
310
311
312
313
314
315
316
    return_data = {}
    with open(filename, 'r') as logfile:

        # Find column names and initialize
        description = logfile.readline()
        return_data['description'] = description
        # Usually the names are in the second line
        if not colnames:
Qusai Al Shidi's avatar
Qusai Al Shidi committed
317
            colnames = logfile.readline().split()
318
319
320
321
322
323
        try:
            colnames = _fix_str_duplicates(colnames)
        except RuntimeWarning:
            print(f'{__name__}: Warning: '
                  + 'Found duplicates in column names. '
                  + 'Changes made to column names.')
324
325
326
327
328
329
        for col in colnames:
            return_data[col] = []

        # Fill data dictionary
        for line_num, line in enumerate(logfile):
            if line_num > 2:  # First two lines are usually metadata
330
                for col, data in enumerate(line.strip().split()):
331
332
333
334
                    if dtypes:
                        data = dtypes[col](data)
                    else:
                        data = float(data)
335
336
337
338
                    return_data[colnames[col]].append(data)

        # datetime index
        if index_time:
339
            return_data['times'] = []
340
            for row, year in enumerate(return_data[colnames[1]]):
341
                return_data['times'].append(
342
343
344
345
346
347
348
349
350
                    dt.datetime(int(year),
                                int(return_data[colnames[2]][row]),  # month
                                int(return_data[colnames[3]][row]),  # day
                                int(return_data[colnames[4]][row]),  # hour
                                int(return_data[colnames[5]][row]),  # min
                                int(return_data[colnames[6]][row]),  # sec
                                int(return_data[colnames[7]][row])))  # ms

    return return_data
351
352
353
354
355
356
357
358
359
360
361
362
363
364


def _fix_str_duplicates(str_list):
    """Returns a list and bool if a fix was made.
       The fix is adding an _[index] to avoid duplicates.
    """
    duplicate_found = False
    for index, string in enumerate(str_list):
        if str_list.count(string) > 1:
            duplicate_found = True
            str_list[index] = string + f'_{index}'
    if duplicate_found:
        raise RuntimeWarning
    return str_list