Note: The default ITS GitLab runner is a shared resource and is subject to slowdowns during heavy usage.
You can run your own GitLab runner that is dedicated just to your group if you need to avoid processing delays.

io.py 13.3 KB
Newer Older
Qusai Al Shidi's avatar
Qusai Al Shidi committed
1
2
3
4
5
"""Input/Output tools

Here are tools to read and write files relating to SWMF.

TODO: Move pandas dependancy elsewhere.
Qusai Al Shidi's avatar
Qusai Al Shidi committed
6
7
8
9
10
11
12
"""

import datetime as dt
import numpy as np


def read_wdc_ae(wdc_filename):
13
14
    """Read an auroral electrojet (AE) indeces from Kyoto's World Data Center
       text file into a dictionary of lists.
Qusai Al Shidi's avatar
Qusai Al Shidi committed
15
16

    Args:
Qusai Al Shidi's avatar
Qusai Al Shidi committed
17

Qusai Al Shidi's avatar
Qusai Al Shidi committed
18
19
20
        wdc_filename (str): Filename of wdc data from
                            http://wdc.kugi.kyoto-u.ac.jp/
    Returns:
Qusai Al Shidi's avatar
Qusai Al Shidi committed
21

22
23
        dict: {
               Auroral indeces 'AL', 'AE', 'AO', 'AU' (int): {
Qusai Al Shidi's avatar
Qusai Al Shidi committed
24
25
               'times' (datetime.datetime)
               'values' (int): List of indeces.
Qusai Al Shidi's avatar
Qusai Al Shidi committed
26
27
              }
    """
28
29
30
31
32
33
34
35

    # Initialize return data
    return_data = {'AL': {'times': [], 'values': []},
                   'AE': {'times': [], 'values': []},
                   'AO': {'times': [], 'values': []},
                   'AU': {'times': [], 'values': []}}

    # Open and make sure it is correct file
36
    with open(wdc_filename, 'rt') as wdc_file:
37
        header = wdc_file.readline()
38
39
        assert header[:8] == 'AEALAOAU', \
            'Does not seem to be a WDC AE file. First 8 chars: ' + header[:8]
40
41

        # Parse
Qusai Al Shidi's avatar
Qusai Al Shidi committed
42
        for line in wdc_file:
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
            data = line.split()
            year_suffix = int(data[1][:2])
            if year_suffix < 50:
                year = 2000 + year_suffix
            else:
                year = 1990 + year_suffix
            month = int(data[1][2:4])
            day = int(data[1][4:6])
            hour = int(data[1][7:9])
            index = data[1][-2:]
            values_60 = [int(val) for val in data[3:60+3]]

            # Fill
            for minute, value in enumerate(values_60):
                return_data[index]['values'].append(value)
                return_data[index]['times'].append(
                    dt.datetime(year, month, day, hour, minute))

    return return_data
Qusai Al Shidi's avatar
Qusai Al Shidi committed
62
63


64
def read_wdc_asy_sym(wdc_filename):
65
66
67
68
69
70
71
    """Reads a WDC file for ASY/SYM data.

    Reads an ASY/SYM file downloaded from
    http://wdc.kugi.kyoto-u.ac.jp/aeasy/index.html
    and puts it into a dictionary.

    Args:
72
        wdc_filename (str): Relative filename (or file handle no.) to read.
73
74
75
76
77
78

    Returns:
        dict: of values.
        {'[ASY-SYM]-[D-H]': 'times': [], 'values': []}

    Examples:
Qusai Al Shidi's avatar
Qusai Al Shidi committed
79
        ```python
80
81
82
83
84
85
86
87
88
89
        indeces = swmfpy.io.read_wdc_asy_sym('wdc.dat')
        # Plot data
        plt.plot(indeces['SYM-H']['times'],
                 indeces['SYM-H']['values'],
                 label='SYM-H [nT]'
                 )
        plt.xlabel('Time [UT]')
        ```

    Important to note if there is bad data it will be filled as None.
90
91
    """

92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
    return_data = {
        'ASY-D': {
            'times': [],
            'values': [],
            },
        'ASY-H': {
            'times': [],
            'values': [],
            },
        'SYM-D': {
            'times': [],
            'values': [],
            },
        'SYM-H': {
            'times': [],
            'values': []
            }
        }
110
111

    with open(wdc_filename) as wdc_file:
112
113
114
115
116
117
118
119
        # Check for correct file
        header = wdc_file.readline()
        assert header[:12] == 'ASYSYM N6E01', ('File does not seem to be'
                                               + 'an ASY/SYM file from wdc.'
                                               + 'First 12 characters: '
                                               + header)
        return_data['edition'] = header[24:34]

120
        for line in wdc_file:
121
122
            # Parse
            year = int(line[12:14])
123
124
125
126
            if year < 70:  # Starts from 1970 but only gives 2 digits
                year += 2000
            else:
                year += 1900
127
128
129
130
131
132
133
            month = int(line[14:16])
            day = int(line[16:18])
            hour = int(line[19:21])
            comp = line[18]
            index = line[21:24]

            # Fill 60 min data
134
            data = line.split()
135
136
137
138
139
140
141
142
143
144
145
            values_60 = [int(val) for val in data[2:62]]
            for minute, value in enumerate(values_60):
                return_data[index+'-'+comp]['times'].append(
                    dt.datetime(year, month, day, hour, minute))
                # Check if data is bad
                if value != 99999:
                    return_data[index+'-'+comp]['values'].append(
                        value)
                else:
                    return_data[index+'-'+comp]['values'].append(
                        None)
146

147
    return return_data
148
149


Qusai Al Shidi's avatar
Qusai Al Shidi committed
150
151
152
153
154
def read_omni_csv(filename, filtering=False, **kwargs):
    """Take an OMNI csv file from cdaweb.sci.gsfc.nasa.gov
    and turn it into a pandas.DataFrame.

    Args:
155
156
157
158
        fnames (dict): dict with filenames from omni .lst files.
                       The keys must be: density, temperature,
                                         magnetic_field, velocity
        filtering (bool): default=False Remove points where the value
Qusai Al Shidi's avatar
Qusai Al Shidi committed
159
                          is >sigma (default: sigma=3) from mean.
Qusai Al Shidi's avatar
Qusai Al Shidi committed
160
161
162
163
164

    **kwargs:
        coarseness (int): default=3, Number of standard deviations
                          above which to remove if filtering=True.
        clean (bool): default=True, Clean the omni data of bad data points
Qusai Al Shidi's avatar
Qusai Al Shidi committed
165

166
167
    Returns:
        pandas.DataFrame: object with solar wind data
Qusai Al Shidi's avatar
Qusai Al Shidi committed
168
169
170
171
172
173

    Make sure to download the csv files with cdaweb.sci.gsfc.nasa.gov
    the header seperated into a json file for safety.

    This only tested with OMNI data specifically.
    """
174
175
    # TODO: This needs a lot of work

Qusai Al Shidi's avatar
Qusai Al Shidi committed
176
    import pandas as pd
177

Qusai Al Shidi's avatar
Qusai Al Shidi committed
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
    # Read the csv files and set the index to dates
    colnames = ['Time', 'Bx [nT]', 'By [nT]', 'Bz [nT]',
                'Vx [km/s]', 'Vy [km/s]', 'Vz [km/s]',
                'Rho [n/cc]', 'T [K]']
    with open(filename, 'r') as datafile:
        data = pd.read_csv(datafile, names=colnames, skiprows=1)
    data.set_index(pd.to_datetime(data[data.columns[0]]), inplace=True)
    data.drop(columns=data.columns[0], inplace=True)
    data.index.name = "Time [UT]"

    # clean bad data
    if kwargs.get('clean', True):
        data["By [nT]"] = data["By [nT]"][data["By [nT]"].abs() < 80.]
        data["Bx [nT]"] = data["Bx [nT]"][data["Bx [nT]"].abs() < 80.]
        data["Bz [nT]"] = data["Bz [nT]"][data["Bz [nT]"].abs() < 80.]
        data["Rho [n/cc]"] = data["Rho [n/cc]"][data["Rho [n/cc]"] < 500.]
        data["Vx [km/s]"] = data["Vx [km/s]"][data["Vx [km/s]"].abs() < 2000.]
        data["Vz [km/s]"] = data["Vz [km/s]"][data["Vz [km/s]"].abs() < 1000.]
        data["Vy [km/s]"] = data["Vy [km/s]"][data["Vy [km/s]"].abs() < 1000.]
        data["T [K]"] = data["T [K]"][data["T [K]"] < 1.e7]

    if filtering:
200
        _coarse_filtering(data, kwargs.get('coarseness', 3))
Qusai Al Shidi's avatar
Qusai Al Shidi committed
201
202
203
    return data.interpolate().bfill().ffill()


204
def _coarse_filtering(data, coarseness=3):
Qusai Al Shidi's avatar
Qusai Al Shidi committed
205
206
207
208
209
210
211
    """Applies coarse filtering to a pandas.DataFrame"""
    for column in data.columns:
        mean = data[column].abs().mean()
        sigma = data[column].std()
        data[column] = data[data[column].abs() < mean+coarseness*sigma][column]


212
def write_imf_input(data, outfilename="IMF.dat", enable_rb=True, **kwargs):
Qusai Al Shidi's avatar
Qusai Al Shidi committed
213
214
215
216
217
    """Writes the pandas.DataFrame into an input file
    that SWMF can read as input IMF (IMF.dat).

    Args:
        data: pandas.DataFrame object with solar wind data
Qusai Al Shidi's avatar
Qusai Al Shidi committed
218
219
220
221
        outfilename: The output file name for ballistic solar wind data.
                     (default: "IMF.dat")
        enable_rb: Enables solar wind input for the radiation belt model.
                   (default: True)
Qusai Al Shidi's avatar
Qusai Al Shidi committed
222

Qusai Al Shidi's avatar
Qusai Al Shidi committed
223
224
225
    **kwargs:
        gse (bool): (default: False) Use GSE coordinates instead of GSM.

Qusai Al Shidi's avatar
Qusai Al Shidi committed
226
227
228
229
    Other paramaters:
        gse: (default=False)
            Use GSE coordinate system for the file instead of GSM default.
    """
Qusai Al Shidi's avatar
Qusai Al Shidi committed
230

Qusai Al Shidi's avatar
Qusai Al Shidi committed
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
    # Generate BATS-R-US solar wind input file
    with open(outfilename, 'w') as outfile:
        outfile.write("CSV files downloaded from ")
        outfile.write("https://cdaweb.gsfc.nasa.gov/\n")
        if kwargs.get('gse', False):
            outfile.write("#COOR\nGSE\n")
        outfile.write("yr mn dy hr min sec msec bx by bz vx vy vz dens temp\n")
        outfile.write("#START\n")
        for index, rows in data.iterrows():
            outfile.write(index.strftime("%Y %m %d %H %M %S") + ' ')
            outfile.write(index.strftime("%f")[:3] + ' ')
            outfile.write(str(rows['Bx [nT]'])[:7] + ' ')
            outfile.write(str(rows['By [nT]'])[:7] + ' ')
            outfile.write(str(rows['Bz [nT]'])[:7] + ' ')
            outfile.write(str(rows['Vx [km/s]'])[:7] + ' ')
            outfile.write(str(rows['Vy [km/s]'])[:7] + ' ')
            outfile.write(str(rows['Vz [km/s]'])[:7] + ' ')
            outfile.write(str(rows['Rho [n/cc]'])[:7] + ' ')
            outfile.write(str(rows['T [K]'])[:7] + ' ')
            outfile.write('\n')
    # Generate RBE model solar wind input file
    if enable_rb:
        with open("RB.SWIMF", 'w') as rbfile:
            # Choose first element as t=0 header (unsure if this is safe)
            rbfile.write(data.index[0].strftime("%Y, %j, %H ")
                         + "! iyear, iday, ihour corresponding to t=0\n")
            swlag_time = None
            if swlag_time in kwargs:
                rbfile.write(str(kwargs["swlag_time"]) + "  "
                             + "! swlag time in seconds "
                             + "for sw travel to subsolar\n")
            # Unsure what 11902 means but following example file
            rbfile.write("11902 data                   "
                         + "P+ NP NONLIN    P+ V (MOM)\n")
            # Quantity header
            rbfile.write("dd mm yyyy hh mm ss.ms           "
                         + "#/cc          km/s\n")
            for index, rows in data.iterrows():
                rbfile.write(index.strftime("%d %m %Y %H %M %S.%f")
                             + "     "
                             + str(rows['Rho [n/cc]'])[:8]
                             + "     "
                             # Speed magnitude
                             + str(np.sqrt(rows['Vx [km/s]']**2
                                           + rows['Vy [km/s]']**2
                                           + rows['Vz [km/s]']**2))[:8]
                             + '\n')


280
def read_gm_log(filename, colnames=None, dtypes=None, index_time=True):
281
    """Make a dictionary out of the indeces outputted
Qusai Al Shidi's avatar
Qusai Al Shidi committed
282
283
284
    from the GM model log.

    Args:
285
        filename (str): The relative filename as a string. (or file handle no.)
286
287
288
        colnames ([str]): (default: None) Supply the name of the columns.
                                          If None it will use second line
                                          of log file.
289
290
        dtypes ([types]): (default: None) Provide types for the columns, if
                                          None then all will be float.
291
292
293
        index_time (bool): (default: True) Make a column of dt.datetime objects
                                           in dictionary key 'Time [UT]'.

Qusai Al Shidi's avatar
Qusai Al Shidi committed
294
    Returns:
295
        Dictionary of the log file
Qusai Al Shidi's avatar
Qusai Al Shidi committed
296
297

    Examples:
Qusai Al Shidi's avatar
Qusai Al Shidi committed
298
        To plot AL and Dst get the log files
Qusai Al Shidi's avatar
Qusai Al Shidi committed
299
        ```python
300
301
        geo = swmfpy.io.read_gm_log('run/GM/IO2/geoindex_e20140215-100500.log')
        dst = swmfpy.io.read_gm_log('run/GM/IO2/log_e20140215-100500.log')
Qusai Al Shidi's avatar
Qusai Al Shidi committed
302

303
        # Plot AL indeces
304
        plt.plot(geo['times', geo['AL'])
Qusai Al Shidi's avatar
Qusai Al Shidi committed
305
        ```
Qusai Al Shidi's avatar
Qusai Al Shidi committed
306

307
    """
Qusai Al Shidi's avatar
Qusai Al Shidi committed
308

Qusai Al Shidi's avatar
Qusai Al Shidi committed
309
    # If column names were not specified
310
311
312
313
314
315
316
317
    return_data = {}
    with open(filename, 'r') as logfile:

        # Find column names and initialize
        description = logfile.readline()
        return_data['description'] = description
        # Usually the names are in the second line
        if not colnames:
Qusai Al Shidi's avatar
Qusai Al Shidi committed
318
            colnames = logfile.readline().split()
319
320
321
322
323
324
        try:
            colnames = _fix_str_duplicates(colnames)
        except RuntimeWarning:
            print(f'{__name__}: Warning: '
                  + 'Found duplicates in column names. '
                  + 'Changes made to column names.')
325
326
327
328
329
330
        for col in colnames:
            return_data[col] = []

        # Fill data dictionary
        for line_num, line in enumerate(logfile):
            if line_num > 2:  # First two lines are usually metadata
331
                for col, data in enumerate(line.strip().split()):
332
333
334
335
                    if dtypes:
                        data = dtypes[col](data)
                    else:
                        data = float(data)
336
337
338
339
                    return_data[colnames[col]].append(data)

        # datetime index
        if index_time:
340
            return_data['times'] = []
341
            for row, year in enumerate(return_data[colnames[1]]):
342
                return_data['times'].append(
343
344
345
346
347
348
349
350
351
                    dt.datetime(int(year),
                                int(return_data[colnames[2]][row]),  # month
                                int(return_data[colnames[3]][row]),  # day
                                int(return_data[colnames[4]][row]),  # hour
                                int(return_data[colnames[5]][row]),  # min
                                int(return_data[colnames[6]][row]),  # sec
                                int(return_data[colnames[7]][row])))  # ms

    return return_data
352
353
354
355
356
357
358
359
360
361
362
363
364
365


def _fix_str_duplicates(str_list):
    """Returns a list and bool if a fix was made.
       The fix is adding an _[index] to avoid duplicates.
    """
    duplicate_found = False
    for index, string in enumerate(str_list):
        if str_list.count(string) > 1:
            duplicate_found = True
            str_list[index] = string + f'_{index}'
    if duplicate_found:
        raise RuntimeWarning
    return str_list