io.py 12.7 KB
Newer Older
Qusai Al Shidi's avatar
Qusai Al Shidi committed
1
2
3
4
5
6
7
8
"""Input/Output tools

Input/Output
============

Here are tools to read and write files relating to SWMF.

TODO: Move pandas dependancy elsewhere.
Qusai Al Shidi's avatar
Qusai Al Shidi committed
9
10
11
12
13
14
15
"""

import datetime as dt
import numpy as np


def read_wdc_ae(wdc_filename):
16
17
    """Read an auroral electrojet (AE) indeces from Kyoto's World Data Center
       text file into a dictionary of lists.
Qusai Al Shidi's avatar
Qusai Al Shidi committed
18
19
20
21
22

    Args:
        wdc_filename (str): Filename of wdc data from
                            http://wdc.kugi.kyoto-u.ac.jp/
    Returns:
23
24
25
26
27
        dict: {
               Auroral indeces 'AL', 'AE', 'AO', 'AU' (int): {
                    'times' (datetime.datetime): List of datetime objects
                                                 corresponding to time in UT.
                    'values' (int): List of indeces.
Qusai Al Shidi's avatar
Qusai Al Shidi committed
28
29
              }
    """
30
31
32
33
34
35
36
37

    # Initialize return data
    return_data = {'AL': {'times': [], 'values': []},
                   'AE': {'times': [], 'values': []},
                   'AO': {'times': [], 'values': []},
                   'AU': {'times': [], 'values': []}}

    # Open and make sure it is correct file
Qusai Al Shidi's avatar
Qusai Al Shidi committed
38
    with open(wdc_filename) as wdc_file:
39
        header = wdc_file.readline()
40
41
        assert header[:8] == 'AEALAOAU', \
            'Does not seem to be a WDC AE file. First 8 chars: ' + header[:8]
42
43

        # Parse
Qusai Al Shidi's avatar
Qusai Al Shidi committed
44
        for line in wdc_file:
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
            data = line.split()
            year_suffix = int(data[1][:2])
            if year_suffix < 50:
                year = 2000 + year_suffix
            else:
                year = 1990 + year_suffix
            month = int(data[1][2:4])
            day = int(data[1][4:6])
            hour = int(data[1][7:9])
            index = data[1][-2:]
            values_60 = [int(val) for val in data[3:60+3]]

            # Fill
            for minute, value in enumerate(values_60):
                return_data[index]['values'].append(value)
                return_data[index]['times'].append(
                    dt.datetime(year, month, day, hour, minute))

    return return_data
Qusai Al Shidi's avatar
Qusai Al Shidi committed
64
65


66
def read_wdc_asy_sym(wdc_filename):
67
68
69
70
71
72
73
    """Reads a WDC file for ASY/SYM data.

    Reads an ASY/SYM file downloaded from
    http://wdc.kugi.kyoto-u.ac.jp/aeasy/index.html
    and puts it into a dictionary.

    Args:
74
        wdc_filename (str): Relative filename (or file handle no.) to read.
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91

    Returns:
        dict: of values.
        {'[ASY-SYM]-[D-H]': 'times': [], 'values': []}

    Examples:
        ```
        indeces = swmfpy.io.read_wdc_asy_sym('wdc.dat')
        # Plot data
        plt.plot(indeces['SYM-H']['times'],
                 indeces['SYM-H']['values'],
                 label='SYM-H [nT]'
                 )
        plt.xlabel('Time [UT]')
        ```

    Important to note if there is bad data it will be filled as None.
92
93
    """

94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
    return_data = {
        'ASY-D': {
            'times': [],
            'values': [],
            },
        'ASY-H': {
            'times': [],
            'values': [],
            },
        'SYM-D': {
            'times': [],
            'values': [],
            },
        'SYM-H': {
            'times': [],
            'values': []
            }
        }
112
113

    with open(wdc_filename) as wdc_file:
114
115
116
117
118
119
120
121
        # Check for correct file
        header = wdc_file.readline()
        assert header[:12] == 'ASYSYM N6E01', ('File does not seem to be'
                                               + 'an ASY/SYM file from wdc.'
                                               + 'First 12 characters: '
                                               + header)
        return_data['edition'] = header[24:34]

122
        for line in wdc_file:
123
124
            # Parse
            year = int(line[12:14])
125
126
127
128
            if year < 70:  # Starts from 1970 but only gives 2 digits
                year += 2000
            else:
                year += 1900
129
130
131
132
133
134
135
            month = int(line[14:16])
            day = int(line[16:18])
            hour = int(line[19:21])
            comp = line[18]
            index = line[21:24]

            # Fill 60 min data
136
            data = line.split()
137
138
139
140
141
142
143
144
145
146
147
            values_60 = [int(val) for val in data[2:62]]
            for minute, value in enumerate(values_60):
                return_data[index+'-'+comp]['times'].append(
                    dt.datetime(year, month, day, hour, minute))
                # Check if data is bad
                if value != 99999:
                    return_data[index+'-'+comp]['values'].append(
                        value)
                else:
                    return_data[index+'-'+comp]['values'].append(
                        None)
148

149
    return return_data
150
151


Qusai Al Shidi's avatar
Qusai Al Shidi committed
152
153
154
155
156
def read_omni_csv(filename, filtering=False, **kwargs):
    """Take an OMNI csv file from cdaweb.sci.gsfc.nasa.gov
    and turn it into a pandas.DataFrame.

    Args:
157
158
159
160
        fnames (dict): dict with filenames from omni .lst files.
                       The keys must be: density, temperature,
                                         magnetic_field, velocity
        filtering (bool): default=False Remove points where the value
Qusai Al Shidi's avatar
Qusai Al Shidi committed
161
                          is >sigma (default: sigma=3) from mean.
162
163
164
165
        **kwargs:
            coarseness (int): default=3, Number of standard deviations
                              above which to remove if filtering=True.
            clean (bool): default=True, Clean the omni data of bad data points
Qusai Al Shidi's avatar
Qusai Al Shidi committed
166

167
168
    Returns:
        pandas.DataFrame: object with solar wind data
Qusai Al Shidi's avatar
Qusai Al Shidi committed
169
170
171
172
173
174

    Make sure to download the csv files with cdaweb.sci.gsfc.nasa.gov
    the header seperated into a json file for safety.

    This only tested with OMNI data specifically.
    """
175
176
    # TODO: This needs a lot of work

Qusai Al Shidi's avatar
Qusai Al Shidi committed
177
    import pandas as pd
178

Qusai Al Shidi's avatar
Qusai Al Shidi committed
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
    # Read the csv files and set the index to dates
    colnames = ['Time', 'Bx [nT]', 'By [nT]', 'Bz [nT]',
                'Vx [km/s]', 'Vy [km/s]', 'Vz [km/s]',
                'Rho [n/cc]', 'T [K]']
    with open(filename, 'r') as datafile:
        data = pd.read_csv(datafile, names=colnames, skiprows=1)
    data.set_index(pd.to_datetime(data[data.columns[0]]), inplace=True)
    data.drop(columns=data.columns[0], inplace=True)
    data.index.name = "Time [UT]"

    # clean bad data
    if kwargs.get('clean', True):
        data["By [nT]"] = data["By [nT]"][data["By [nT]"].abs() < 80.]
        data["Bx [nT]"] = data["Bx [nT]"][data["Bx [nT]"].abs() < 80.]
        data["Bz [nT]"] = data["Bz [nT]"][data["Bz [nT]"].abs() < 80.]
        data["Rho [n/cc]"] = data["Rho [n/cc]"][data["Rho [n/cc]"] < 500.]
        data["Vx [km/s]"] = data["Vx [km/s]"][data["Vx [km/s]"].abs() < 2000.]
        data["Vz [km/s]"] = data["Vz [km/s]"][data["Vz [km/s]"].abs() < 1000.]
        data["Vy [km/s]"] = data["Vy [km/s]"][data["Vy [km/s]"].abs() < 1000.]
        data["T [K]"] = data["T [K]"][data["T [K]"] < 1.e7]

    if filtering:
201
        _coarse_filtering(data, kwargs.get('coarseness', 3))
Qusai Al Shidi's avatar
Qusai Al Shidi committed
202
203
204
    return data.interpolate().bfill().ffill()


205
def _coarse_filtering(data, coarseness=3):
Qusai Al Shidi's avatar
Qusai Al Shidi committed
206
207
208
209
210
211
212
    """Applies coarse filtering to a pandas.DataFrame"""
    for column in data.columns:
        mean = data[column].abs().mean()
        sigma = data[column].std()
        data[column] = data[data[column].abs() < mean+coarseness*sigma][column]


213
def write_imf_input(data, outfilename="IMF.dat", enable_rb=True, **kwargs):
Qusai Al Shidi's avatar
Qusai Al Shidi committed
214
215
216
217
218
    """Writes the pandas.DataFrame into an input file
    that SWMF can read as input IMF (IMF.dat).

    Args:
        data: pandas.DataFrame object with solar wind data
Qusai Al Shidi's avatar
Qusai Al Shidi committed
219
220
221
222
        outfilename: The output file name for ballistic solar wind data.
                     (default: "IMF.dat")
        enable_rb: Enables solar wind input for the radiation belt model.
                   (default: True)
Qusai Al Shidi's avatar
Qusai Al Shidi committed
223
224
225
226
227

    Other paramaters:
        gse: (default=False)
            Use GSE coordinate system for the file instead of GSM default.
    """
Qusai Al Shidi's avatar
Qusai Al Shidi committed
228

Qusai Al Shidi's avatar
Qusai Al Shidi committed
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
    # Generate BATS-R-US solar wind input file
    with open(outfilename, 'w') as outfile:
        outfile.write("CSV files downloaded from ")
        outfile.write("https://cdaweb.gsfc.nasa.gov/\n")
        if kwargs.get('gse', False):
            outfile.write("#COOR\nGSE\n")
        outfile.write("yr mn dy hr min sec msec bx by bz vx vy vz dens temp\n")
        outfile.write("#START\n")
        for index, rows in data.iterrows():
            outfile.write(index.strftime("%Y %m %d %H %M %S") + ' ')
            outfile.write(index.strftime("%f")[:3] + ' ')
            outfile.write(str(rows['Bx [nT]'])[:7] + ' ')
            outfile.write(str(rows['By [nT]'])[:7] + ' ')
            outfile.write(str(rows['Bz [nT]'])[:7] + ' ')
            outfile.write(str(rows['Vx [km/s]'])[:7] + ' ')
            outfile.write(str(rows['Vy [km/s]'])[:7] + ' ')
            outfile.write(str(rows['Vz [km/s]'])[:7] + ' ')
            outfile.write(str(rows['Rho [n/cc]'])[:7] + ' ')
            outfile.write(str(rows['T [K]'])[:7] + ' ')
            outfile.write('\n')
    # Generate RBE model solar wind input file
    if enable_rb:
        with open("RB.SWIMF", 'w') as rbfile:
            # Choose first element as t=0 header (unsure if this is safe)
            rbfile.write(data.index[0].strftime("%Y, %j, %H ")
                         + "! iyear, iday, ihour corresponding to t=0\n")
            swlag_time = None
            if swlag_time in kwargs:
                rbfile.write(str(kwargs["swlag_time"]) + "  "
                             + "! swlag time in seconds "
                             + "for sw travel to subsolar\n")
            # Unsure what 11902 means but following example file
            rbfile.write("11902 data                   "
                         + "P+ NP NONLIN    P+ V (MOM)\n")
            # Quantity header
            rbfile.write("dd mm yyyy hh mm ss.ms           "
                         + "#/cc          km/s\n")
            for index, rows in data.iterrows():
                rbfile.write(index.strftime("%d %m %Y %H %M %S.%f")
                             + "     "
                             + str(rows['Rho [n/cc]'])[:8]
                             + "     "
                             # Speed magnitude
                             + str(np.sqrt(rows['Vx [km/s]']**2
                                           + rows['Vy [km/s]']**2
                                           + rows['Vz [km/s]']**2))[:8]
                             + '\n')


278
def read_gm_log(filename, colnames=None, dtypes=None, index_time=True):
279
    """Make a dictionary out of the indeces outputted
Qusai Al Shidi's avatar
Qusai Al Shidi committed
280
281
282
    from the GM model log.

    Args:
283
        filename (str): The relative filename as a string. (or file handle no.)
284
285
286
        colnames ([str]): (default: None) Supply the name of the columns.
                                          If None it will use second line
                                          of log file.
287
288
        dtypes ([types]): (default: None) Provide types for the columns, if
                                          None then all will be float.
289
290
291
        index_time (bool): (default: True) Make a column of dt.datetime objects
                                           in dictionary key 'Time [UT]'.

Qusai Al Shidi's avatar
Qusai Al Shidi committed
292
    Returns:
293
        Dictionary of the log file
Qusai Al Shidi's avatar
Qusai Al Shidi committed
294
295

    Examples:
Qusai Al Shidi's avatar
Qusai Al Shidi committed
296
297
        To plot AL and Dst get the log files
        ```
298
299
        geo = swmfpy.io.read_gm_log('run/GM/IO2/geoindex_e20140215-100500.log')
        dst = swmfpy.io.read_gm_log('run/GM/IO2/log_e20140215-100500.log')
Qusai Al Shidi's avatar
Qusai Al Shidi committed
300

301
        # Plot AL indeces
302
        plt.plot(geo['times', geo['AL'])
Qusai Al Shidi's avatar
Qusai Al Shidi committed
303
        ```
Qusai Al Shidi's avatar
Qusai Al Shidi committed
304

305
    """
Qusai Al Shidi's avatar
Qusai Al Shidi committed
306

Qusai Al Shidi's avatar
Qusai Al Shidi committed
307
    # If column names were not specified
308
309
310
311
312
313
314
315
    return_data = {}
    with open(filename, 'r') as logfile:

        # Find column names and initialize
        description = logfile.readline()
        return_data['description'] = description
        # Usually the names are in the second line
        if not colnames:
Qusai Al Shidi's avatar
Qusai Al Shidi committed
316
            colnames = logfile.readline().split()
317
318
319
320
321
322
323
        for col in colnames:
            return_data[col] = []

        # Fill data dictionary
        for line_num, line in enumerate(logfile):
            if line_num > 2:  # First two lines are usually metadata
                for col, data in enumerate(line.split()):
324
325
326
327
                    if dtypes:
                        data = dtypes[col](data)
                    else:
                        data = float(data)
328
329
330
331
                    return_data[colnames[col]].append(data)

        # datetime index
        if index_time:
332
            return_data['times'] = []
333
            for row, year in enumerate(return_data[colnames[1]]):
334
                return_data['times'].append(
335
336
337
338
339
340
341
342
343
                    dt.datetime(int(year),
                                int(return_data[colnames[2]][row]),  # month
                                int(return_data[colnames[3]][row]),  # day
                                int(return_data[colnames[4]][row]),  # hour
                                int(return_data[colnames[5]][row]),  # min
                                int(return_data[colnames[6]][row]),  # sec
                                int(return_data[colnames[7]][row])))  # ms

    return return_data