Note: The default ITS GitLab runner is a shared resource and is subject to slowdowns during heavy usage.
You can run your own GitLab runner that is dedicated just to your group if you need to avoid processing delays.

io.py 13.3 KB
Newer Older
Qusai Al Shidi's avatar
Qusai Al Shidi committed
1
2
3
4
5
"""Input/Output tools

Here are tools to read and write files relating to SWMF.

TODO: Move pandas dependancy elsewhere.
Qusai Al Shidi's avatar
Qusai Al Shidi committed
6
7
8
9
10
11
12
"""

import datetime as dt
import numpy as np


def read_wdc_ae(wdc_filename):
13
14
    """Read an auroral electrojet (AE) indeces from Kyoto's World Data Center
       text file into a dictionary of lists.
Qusai Al Shidi's avatar
Qusai Al Shidi committed
15
16

    Args:
Qusai Al Shidi's avatar
Qusai Al Shidi committed
17

Qusai Al Shidi's avatar
Qusai Al Shidi committed
18
19
20
        wdc_filename (str): Filename of wdc data from
                            http://wdc.kugi.kyoto-u.ac.jp/
    Returns:
Qusai Al Shidi's avatar
Qusai Al Shidi committed
21

22
23
24
25
26
        dict: {
               Auroral indeces 'AL', 'AE', 'AO', 'AU' (int): {
                    'times' (datetime.datetime): List of datetime objects
                                                 corresponding to time in UT.
                    'values' (int): List of indeces.
Qusai Al Shidi's avatar
Qusai Al Shidi committed
27
28
              }
    """
29
30
31
32
33
34
35
36

    # Initialize return data
    return_data = {'AL': {'times': [], 'values': []},
                   'AE': {'times': [], 'values': []},
                   'AO': {'times': [], 'values': []},
                   'AU': {'times': [], 'values': []}}

    # Open and make sure it is correct file
37
    with open(wdc_filename, 'rt') as wdc_file:
38
        header = wdc_file.readline()
39
40
        assert header[:8] == 'AEALAOAU', \
            'Does not seem to be a WDC AE file. First 8 chars: ' + header[:8]
41
42

        # Parse
Qusai Al Shidi's avatar
Qusai Al Shidi committed
43
        for line in wdc_file:
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
            data = line.split()
            year_suffix = int(data[1][:2])
            if year_suffix < 50:
                year = 2000 + year_suffix
            else:
                year = 1990 + year_suffix
            month = int(data[1][2:4])
            day = int(data[1][4:6])
            hour = int(data[1][7:9])
            index = data[1][-2:]
            values_60 = [int(val) for val in data[3:60+3]]

            # Fill
            for minute, value in enumerate(values_60):
                return_data[index]['values'].append(value)
                return_data[index]['times'].append(
                    dt.datetime(year, month, day, hour, minute))

    return return_data
Qusai Al Shidi's avatar
Qusai Al Shidi committed
63
64


65
def read_wdc_asy_sym(wdc_filename):
66
67
68
69
70
71
72
    """Reads a WDC file for ASY/SYM data.

    Reads an ASY/SYM file downloaded from
    http://wdc.kugi.kyoto-u.ac.jp/aeasy/index.html
    and puts it into a dictionary.

    Args:
73
        wdc_filename (str): Relative filename (or file handle no.) to read.
74
75
76
77
78
79

    Returns:
        dict: of values.
        {'[ASY-SYM]-[D-H]': 'times': [], 'values': []}

    Examples:
Qusai Al Shidi's avatar
Qusai Al Shidi committed
80
81
        ```python

82
83
84
85
86
87
88
        indeces = swmfpy.io.read_wdc_asy_sym('wdc.dat')
        # Plot data
        plt.plot(indeces['SYM-H']['times'],
                 indeces['SYM-H']['values'],
                 label='SYM-H [nT]'
                 )
        plt.xlabel('Time [UT]')
Qusai Al Shidi's avatar
Qusai Al Shidi committed
89

90
91
92
        ```

    Important to note if there is bad data it will be filled as None.
93
94
    """

95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
    return_data = {
        'ASY-D': {
            'times': [],
            'values': [],
            },
        'ASY-H': {
            'times': [],
            'values': [],
            },
        'SYM-D': {
            'times': [],
            'values': [],
            },
        'SYM-H': {
            'times': [],
            'values': []
            }
        }
113
114

    with open(wdc_filename) as wdc_file:
115
116
117
118
119
120
121
122
        # Check for correct file
        header = wdc_file.readline()
        assert header[:12] == 'ASYSYM N6E01', ('File does not seem to be'
                                               + 'an ASY/SYM file from wdc.'
                                               + 'First 12 characters: '
                                               + header)
        return_data['edition'] = header[24:34]

123
        for line in wdc_file:
124
125
            # Parse
            year = int(line[12:14])
126
127
128
129
            if year < 70:  # Starts from 1970 but only gives 2 digits
                year += 2000
            else:
                year += 1900
130
131
132
133
134
135
136
            month = int(line[14:16])
            day = int(line[16:18])
            hour = int(line[19:21])
            comp = line[18]
            index = line[21:24]

            # Fill 60 min data
137
            data = line.split()
138
139
140
141
142
143
144
145
146
147
148
            values_60 = [int(val) for val in data[2:62]]
            for minute, value in enumerate(values_60):
                return_data[index+'-'+comp]['times'].append(
                    dt.datetime(year, month, day, hour, minute))
                # Check if data is bad
                if value != 99999:
                    return_data[index+'-'+comp]['values'].append(
                        value)
                else:
                    return_data[index+'-'+comp]['values'].append(
                        None)
149

150
    return return_data
151
152


Qusai Al Shidi's avatar
Qusai Al Shidi committed
153
154
155
156
157
def read_omni_csv(filename, filtering=False, **kwargs):
    """Take an OMNI csv file from cdaweb.sci.gsfc.nasa.gov
    and turn it into a pandas.DataFrame.

    Args:
158
159
160
161
        fnames (dict): dict with filenames from omni .lst files.
                       The keys must be: density, temperature,
                                         magnetic_field, velocity
        filtering (bool): default=False Remove points where the value
Qusai Al Shidi's avatar
Qusai Al Shidi committed
162
                          is >sigma (default: sigma=3) from mean.
163
164
165
166
        **kwargs:
            coarseness (int): default=3, Number of standard deviations
                              above which to remove if filtering=True.
            clean (bool): default=True, Clean the omni data of bad data points
Qusai Al Shidi's avatar
Qusai Al Shidi committed
167

168
169
    Returns:
        pandas.DataFrame: object with solar wind data
Qusai Al Shidi's avatar
Qusai Al Shidi committed
170
171
172
173
174
175

    Make sure to download the csv files with cdaweb.sci.gsfc.nasa.gov
    the header seperated into a json file for safety.

    This only tested with OMNI data specifically.
    """
176
177
    # TODO: This needs a lot of work

Qusai Al Shidi's avatar
Qusai Al Shidi committed
178
    import pandas as pd
179

Qusai Al Shidi's avatar
Qusai Al Shidi committed
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
    # Read the csv files and set the index to dates
    colnames = ['Time', 'Bx [nT]', 'By [nT]', 'Bz [nT]',
                'Vx [km/s]', 'Vy [km/s]', 'Vz [km/s]',
                'Rho [n/cc]', 'T [K]']
    with open(filename, 'r') as datafile:
        data = pd.read_csv(datafile, names=colnames, skiprows=1)
    data.set_index(pd.to_datetime(data[data.columns[0]]), inplace=True)
    data.drop(columns=data.columns[0], inplace=True)
    data.index.name = "Time [UT]"

    # clean bad data
    if kwargs.get('clean', True):
        data["By [nT]"] = data["By [nT]"][data["By [nT]"].abs() < 80.]
        data["Bx [nT]"] = data["Bx [nT]"][data["Bx [nT]"].abs() < 80.]
        data["Bz [nT]"] = data["Bz [nT]"][data["Bz [nT]"].abs() < 80.]
        data["Rho [n/cc]"] = data["Rho [n/cc]"][data["Rho [n/cc]"] < 500.]
        data["Vx [km/s]"] = data["Vx [km/s]"][data["Vx [km/s]"].abs() < 2000.]
        data["Vz [km/s]"] = data["Vz [km/s]"][data["Vz [km/s]"].abs() < 1000.]
        data["Vy [km/s]"] = data["Vy [km/s]"][data["Vy [km/s]"].abs() < 1000.]
        data["T [K]"] = data["T [K]"][data["T [K]"] < 1.e7]

    if filtering:
202
        _coarse_filtering(data, kwargs.get('coarseness', 3))
Qusai Al Shidi's avatar
Qusai Al Shidi committed
203
204
205
    return data.interpolate().bfill().ffill()


206
def _coarse_filtering(data, coarseness=3):
Qusai Al Shidi's avatar
Qusai Al Shidi committed
207
208
209
210
211
212
213
    """Applies coarse filtering to a pandas.DataFrame"""
    for column in data.columns:
        mean = data[column].abs().mean()
        sigma = data[column].std()
        data[column] = data[data[column].abs() < mean+coarseness*sigma][column]


214
def write_imf_input(data, outfilename="IMF.dat", enable_rb=True, **kwargs):
Qusai Al Shidi's avatar
Qusai Al Shidi committed
215
216
217
218
219
    """Writes the pandas.DataFrame into an input file
    that SWMF can read as input IMF (IMF.dat).

    Args:
        data: pandas.DataFrame object with solar wind data
Qusai Al Shidi's avatar
Qusai Al Shidi committed
220
221
222
223
        outfilename: The output file name for ballistic solar wind data.
                     (default: "IMF.dat")
        enable_rb: Enables solar wind input for the radiation belt model.
                   (default: True)
Qusai Al Shidi's avatar
Qusai Al Shidi committed
224
225
226
227
228

    Other paramaters:
        gse: (default=False)
            Use GSE coordinate system for the file instead of GSM default.
    """
Qusai Al Shidi's avatar
Qusai Al Shidi committed
229

Qusai Al Shidi's avatar
Qusai Al Shidi committed
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
    # Generate BATS-R-US solar wind input file
    with open(outfilename, 'w') as outfile:
        outfile.write("CSV files downloaded from ")
        outfile.write("https://cdaweb.gsfc.nasa.gov/\n")
        if kwargs.get('gse', False):
            outfile.write("#COOR\nGSE\n")
        outfile.write("yr mn dy hr min sec msec bx by bz vx vy vz dens temp\n")
        outfile.write("#START\n")
        for index, rows in data.iterrows():
            outfile.write(index.strftime("%Y %m %d %H %M %S") + ' ')
            outfile.write(index.strftime("%f")[:3] + ' ')
            outfile.write(str(rows['Bx [nT]'])[:7] + ' ')
            outfile.write(str(rows['By [nT]'])[:7] + ' ')
            outfile.write(str(rows['Bz [nT]'])[:7] + ' ')
            outfile.write(str(rows['Vx [km/s]'])[:7] + ' ')
            outfile.write(str(rows['Vy [km/s]'])[:7] + ' ')
            outfile.write(str(rows['Vz [km/s]'])[:7] + ' ')
            outfile.write(str(rows['Rho [n/cc]'])[:7] + ' ')
            outfile.write(str(rows['T [K]'])[:7] + ' ')
            outfile.write('\n')
    # Generate RBE model solar wind input file
    if enable_rb:
        with open("RB.SWIMF", 'w') as rbfile:
            # Choose first element as t=0 header (unsure if this is safe)
            rbfile.write(data.index[0].strftime("%Y, %j, %H ")
                         + "! iyear, iday, ihour corresponding to t=0\n")
            swlag_time = None
            if swlag_time in kwargs:
                rbfile.write(str(kwargs["swlag_time"]) + "  "
                             + "! swlag time in seconds "
                             + "for sw travel to subsolar\n")
            # Unsure what 11902 means but following example file
            rbfile.write("11902 data                   "
                         + "P+ NP NONLIN    P+ V (MOM)\n")
            # Quantity header
            rbfile.write("dd mm yyyy hh mm ss.ms           "
                         + "#/cc          km/s\n")
            for index, rows in data.iterrows():
                rbfile.write(index.strftime("%d %m %Y %H %M %S.%f")
                             + "     "
                             + str(rows['Rho [n/cc]'])[:8]
                             + "     "
                             # Speed magnitude
                             + str(np.sqrt(rows['Vx [km/s]']**2
                                           + rows['Vy [km/s]']**2
                                           + rows['Vz [km/s]']**2))[:8]
                             + '\n')


279
def read_gm_log(filename, colnames=None, dtypes=None, index_time=True):
280
    """Make a dictionary out of the indeces outputted
Qusai Al Shidi's avatar
Qusai Al Shidi committed
281
282
283
    from the GM model log.

    Args:
284
        filename (str): The relative filename as a string. (or file handle no.)
285
286
287
        colnames ([str]): (default: None) Supply the name of the columns.
                                          If None it will use second line
                                          of log file.
288
289
        dtypes ([types]): (default: None) Provide types for the columns, if
                                          None then all will be float.
290
291
292
        index_time (bool): (default: True) Make a column of dt.datetime objects
                                           in dictionary key 'Time [UT]'.

Qusai Al Shidi's avatar
Qusai Al Shidi committed
293
    Returns:
294
        Dictionary of the log file
Qusai Al Shidi's avatar
Qusai Al Shidi committed
295
296

    Examples:
Qusai Al Shidi's avatar
Qusai Al Shidi committed
297
        To plot AL and Dst get the log files
Qusai Al Shidi's avatar
Qusai Al Shidi committed
298
299
        ```python

300
301
        geo = swmfpy.io.read_gm_log('run/GM/IO2/geoindex_e20140215-100500.log')
        dst = swmfpy.io.read_gm_log('run/GM/IO2/log_e20140215-100500.log')
Qusai Al Shidi's avatar
Qusai Al Shidi committed
302

303
        # Plot AL indeces
304
        plt.plot(geo['times', geo['AL'])
Qusai Al Shidi's avatar
Qusai Al Shidi committed
305

Qusai Al Shidi's avatar
Qusai Al Shidi committed
306
        ```
Qusai Al Shidi's avatar
Qusai Al Shidi committed
307

308
    """
Qusai Al Shidi's avatar
Qusai Al Shidi committed
309

Qusai Al Shidi's avatar
Qusai Al Shidi committed
310
    # If column names were not specified
311
312
313
314
315
316
317
318
    return_data = {}
    with open(filename, 'r') as logfile:

        # Find column names and initialize
        description = logfile.readline()
        return_data['description'] = description
        # Usually the names are in the second line
        if not colnames:
Qusai Al Shidi's avatar
Qusai Al Shidi committed
319
            colnames = logfile.readline().split()
320
321
322
323
324
325
        try:
            colnames = _fix_str_duplicates(colnames)
        except RuntimeWarning:
            print(f'{__name__}: Warning: '
                  + 'Found duplicates in column names. '
                  + 'Changes made to column names.')
326
327
328
329
330
331
        for col in colnames:
            return_data[col] = []

        # Fill data dictionary
        for line_num, line in enumerate(logfile):
            if line_num > 2:  # First two lines are usually metadata
332
                for col, data in enumerate(line.strip().split()):
333
334
335
336
                    if dtypes:
                        data = dtypes[col](data)
                    else:
                        data = float(data)
337
338
339
340
                    return_data[colnames[col]].append(data)

        # datetime index
        if index_time:
341
            return_data['times'] = []
342
            for row, year in enumerate(return_data[colnames[1]]):
343
                return_data['times'].append(
344
345
346
347
348
349
350
351
352
                    dt.datetime(int(year),
                                int(return_data[colnames[2]][row]),  # month
                                int(return_data[colnames[3]][row]),  # day
                                int(return_data[colnames[4]][row]),  # hour
                                int(return_data[colnames[5]][row]),  # min
                                int(return_data[colnames[6]][row]),  # sec
                                int(return_data[colnames[7]][row])))  # ms

    return return_data
353
354
355
356
357
358
359
360
361
362
363
364
365
366


def _fix_str_duplicates(str_list):
    """Returns a list and bool if a fix was made.
       The fix is adding an _[index] to avoid duplicates.
    """
    duplicate_found = False
    for index, string in enumerate(str_list):
        if str_list.count(string) > 1:
            duplicate_found = True
            str_list[index] = string + f'_{index}'
    if duplicate_found:
        raise RuntimeWarning
    return str_list