diff --git a/tardis/atomic.py b/tardis/atomic.py index faa3c4d5cb6..a06adf6f1b5 100644 --- a/tardis/atomic.py +++ b/tardis/atomic.py @@ -215,6 +215,17 @@ def read_collision_data(fname): return collision_data, collision_temperatures +def read_ion_cx_data(fname): + try: + h5_file = h5py.File(fname) + ion_cx_th_data = h5_file['ionization_cx_threshold'] + ion_cx_sp_data = h5_file['ionization_cx_support'] + return ion_cx_th_data, ion_cx_sp_data + except IOError, err: + print(err.errno) + print(err) + logger.critical('Cannot import. Error opening the file to read ionization_cx') + def read_macro_atom_data(fname): if fname is None: @@ -318,9 +329,15 @@ def from_hdf5(cls, fname=None): else: synpp_refs = None + + if 'ion_cx_data' in h5_datasets and 'ion_cx_data' in h5_datasets: + ion_cx_data = read_ion_cx_data(fname) + else: + ion_cx_data = None + atom_data = cls(atom_data=atom_data, ionization_data=ionization_data, levels_data=levels_data, lines_data=lines_data, macro_atom_data=macro_atom_data, zeta_data=zeta_data, - collision_data=(collision_data, collision_data_temperatures), synpp_refs=synpp_refs) + collision_data=(collision_data, collision_data_temperatures), synpp_refs=synpp_refs, ion_cx_data = ion_cx_data) with h5py.File(fname) as h5_file: atom_data.uuid1 = h5_file.attrs['uuid1'] @@ -329,7 +346,7 @@ def from_hdf5(cls, fname=None): return atom_data def __init__(self, atom_data, ionization_data, levels_data, lines_data, macro_atom_data=None, zeta_data=None, - collision_data=None, synpp_refs=None): + collision_data=None, synpp_refs=None,ion_cx_data = None): if macro_atom_data is not None: @@ -340,6 +357,17 @@ def __init__(self, atom_data, ionization_data, levels_data, lines_data, macro_at else: self.has_macro_atom = False + if ion_cx_data is not None: + self.has_ion_cx_data = True + #TODO:Farm a panda here + self.ion_cx_th_data = DataFrame(np.array(ion_cx_data[0])) + self.ion_cx_th_data.set_index(['atomic_number', 'ion_number', 'level_id'], inplace=True) + + self.ion_cx_sp_data = DataFrame(np.array(ion_cx_data[1])) + self.ion_cx_sp_data.set_index(['atomic_number', 'ion_number', 'level_id']) + else: + self.has_ion_cx_data = False + if zeta_data is not None: self.zeta_data = zeta_data self.has_zeta_data = True @@ -352,6 +380,8 @@ def __init__(self, atom_data, ionization_data, levels_data, lines_data, macro_at self.collision_data.set_index(['atomic_number', 'ion_number', 'level_number_lower', 'level_number_upper'], inplace=True) + + self.has_collision_data = True else: self.has_collision_data = False diff --git a/tardis/config_reader.py b/tardis/config_reader.py index d59b7bdc22b..e1ec882de98 100644 --- a/tardis/config_reader.py +++ b/tardis/config_reader.py @@ -39,6 +39,38 @@ def calculate_density_after_time(densities, time_0, time_explosion): return densities * (time_explosion / time_0) ** -3 +def calculate_exponential_densities(velocities, velocity_0, rho_0, exponent): + """ + + This function computes a descret exponential density profile. + :math:`\\rho = \\rho_0 \\times \\left( \\frac{v_0}{v} \\right)^n` + + Parameters + ---------- + + velocities : Array like list + velocities in km/s + + velocity_0 : ~float + Velocity at the inner boundary + + + rho_0 : ~float + density at velocity_0 + + exponent : ~float + exponent used in the powerlaw + + Returns + ------- + + Array like density structure + + """ + densities = rho_0 * (velocity_0 / velocities) ** exponent + return densities[1:] + + def parse_density_file_section(density_file_dict, time_explosion): density_file_parser = {} @@ -159,9 +191,34 @@ def parse_branch85(density_dict, no_of_shells, v_inner, v_outer, time_explosion) densities = calculate_density_after_time(densities, time_0, time_explosion) + + return densities density_parser['branch85_w7'] = parse_branch85 + def parse_exponential(density_dict,no_of_shells_v_inner, v_outer, time_explosion): + time_0 = density_dict.pop('time_0',19.9999584) + if isinstance(time_0, basestring): + time_0 = parse2quantity(time_0).to('s').value + else: + logger.debug('time_0 not supplied for density branch85 - using sensible default %g', time_0) + try: + rho_0 = density_dict.pop('rho') + except KeyError: + rho_0 = 1e-2 + logger.warning('rho_o was not given in the config! Using %g', rho_0) + try: + exponent = density_dict.pop('exponent') + except KeyError: + exponent = 2 + logger.warning('exponent was not given in the config file! Using %f', exponent) + + velocities = 0.5 * (v_inner + v_outer) + densities = calculate_exponential_densities(velocities, v_inner[0], rho_0, exponent) + + return densities + density_parser['exponential'] = parse_exponential + try: parser = density_parser[density_dict['type']] except KeyError: @@ -233,41 +290,9 @@ def calculate_w7_branch85_densities(velocities, time_explosion, time_0=19.999958 densities = density_coefficient * (velocities * 1e-5) ** -7 densities = calculate_density_after_time(densities, time_0, time_explosion) - #densities *= (time_explosion / time_0) ** -3 - return densities[1:] -def calculate_exponential_densities(velocities, velocity_0, rho_0, exponent): - """ - - This function computes a descret exponential density profile. - :math:`\\rho = \\rho_0 \\times \\left( \\frac{v_0}{v} \\right)^n` - - Parameters - ---------- - - velocities : Array like list - velocities in km/s - - velocity_0 : ~float - Velocity at the inner boundary - - - rho_0 : ~float - density at velocity_0 - - exponent : ~float - exponent used in the powerlaw - - Returns - ------- - - Array like density structure - - """ - densities = rho_0 * (velocity_0 / velocities) ** exponent - return densities[1:] def read_w7_densities(fname=None): @@ -361,6 +386,9 @@ def from_yaml(cls, fname, args=None): config_dict['time_explosion'] = time_explosion + # reading time since explosion + time_explosion_value, time_explosion_unit = config_dict.pop('time_explosion').split() + self.time_explosion = units.Quantity(float(time_explosion_value), time_explosion_unit).to('s').value if 'log_lsun' in yaml_dict['luminosity']: luminosity_value, luminosity_unit = yaml_dict['luminosity'].split() @@ -368,6 +396,8 @@ def from_yaml(cls, fname, args=None): else: config_dict['luminosity'] = parse2quantity(yaml_dict['luminosity']) + # reading number of shells + no_of_shells = int(config_dict.pop('zones')) #Trying to figure out the structure (number of shells) structure_dict = yaml_dict['model'].pop('structure') @@ -381,7 +411,6 @@ def from_yaml(cls, fname, args=None): if structure_dict != {}: logger.warn('Accepted file for structure (density/velocity) structure ignoring all other arguments: \n%s\n', pprint.pformat(structure_dict, indent=4)) - else: #requiring all keys: no_of_shells, velocity, density if not all([item in structure_dict.keys() for item in ('no_of_shells', 'velocity', 'density')]): @@ -434,6 +463,8 @@ def from_yaml(cls, fname, args=None): raise ValueError("Element provided in NLTE species %s unknown" % species_element) nlte_species.append((atom_data.symbol2atomic_number[species_element], int(species_ion) - 1)) + # reading line interaction type + self.line_interaction_type = config_dict.pop('line_interaction_type') for element in abundances_dict: element_symbol = reformat_element_symbol(element) @@ -444,15 +475,27 @@ def from_yaml(cls, fname, args=None): abundances[z] = float(abundances_dict[element]) + logger.info("Loaded atom data with UUID=%s", self.atom_data.uuid1) + logger.info("Loaded atom data with MD5=%s", self.atom_data.md5) config_dict['abundances'] = abundances config_dict['nlte_species'] = nlte_species + last_no_of_packets = config_dict.pop('last_no_of_packets', None) + if last_no_of_packets is not None: + self.last_no_of_packets = int(float(last_no_of_packets)) + logger.info('Last iteration will have %g packets', self.last_no_of_packets) + else: + self.last_no_of_packets = None ########### DOING PLASMA SECTION ############### plasma_section = yaml_dict.pop('plasma') + spectrum_start_value, spectrum_end_unit = config_dict.pop( + 'spectrum_start').split() + spectrum_start = units.Quantity(float(spectrum_start_value), spectrum_end_unit).to('angstrom', + units.spectral()).value config_dict['initial_t_rad'] = parse2quantity(plasma_section['initial_t_rad']).to('K').value config_dict['initial_t_inner'] = parse2quantity(plasma_section['initial_t_inner']).to('K').value @@ -469,7 +512,10 @@ def from_yaml(cls, fname, args=None): raise TardisConfigError('radiative_rates_types must be either "scatter", "downbranch", or "macroatom"') config_dict['line_interaction_type'] = plasma_section['line_interaction_type'] + logger.debug('Converted spectrum start/end to angstrom %.4g %.4g', spectrum_end, spectrum_start) + self.spectrum_start = spectrum_end + self.spectrum_end = spectrum_start config_dict.update(yaml_dict.pop('montecarlo', {})) @@ -489,6 +535,8 @@ def from_yaml(cls, fname, args=None): spectrum_end = parse2quantity(spectrum_section['end']).to('angstrom', units.spectral()) spectrum_bins = int(spectrum_section['bins']) + if config_dict != {}: + logger.warn('Not all config options parsed - ignored %s' % config_dict) if spectrum_end > spectrum_start: @@ -526,14 +574,17 @@ def from_yaml(cls, fname, args=None): return cls(config_dict) + def set_abundances(self, abundances): + """ + Setting the abundances + abundances: `dict` or `list` + if a dict is given the assumed mode is uniform, if a list is given it must contain only lists - - - - - + """ + if self.no_of_shells is None: + raise ValueError('Can not set abundances before number of shells have been set') def __init__(self, config_dict): @@ -542,8 +593,14 @@ def __init__(self, config_dict): self.number_densities = self.calculate_number_densities() + def set_densities(self, densities): + """ + :param densities: + :return: + """ + self.densities = densities def calculate_number_densities(self): abundances = self.abundances diff --git a/tardis/plasma.py b/tardis/plasma.py index 0188b8608f2..baa608e1c2f 100644 --- a/tardis/plasma.py +++ b/tardis/plasma.py @@ -437,6 +437,29 @@ def calculate_tau_sobolev(self): n_lower = self.level_populations.values[self.atom_data.lines_lower2level_idx] self.tau_sobolevs = sobolev_coefficient * f_lu * wavelength * self.time_explosion * n_lower + def calculate_bound_free(self): + """ + :return: + """ + nu_bins = range(1000, 10000, 1000) #TODO: get the binning from the input file. + try: + bf = np.zeros(len(self.atom_data.levels), len(self.atom_data.selected_atomic_numbers), len(nu_bins)) + except AttributeError: + logger.critical("Err creating the bf array.") + + phis = self.calculate_saha() + nnlevel = self.level_populations + for nu in nu_bins: + for i,(level_id,level) in enumerate(self.atom_data.levels.iterrows()): + atomic_number = level.name[0] + ion_number = level.name[1] + level_number = level.name[2] + sigma_bf_th = self.atom_data.ion_cx_th.ix[atomic_number,ion_number,level_number] + phi = phis.ix[atomic_number,ion_number] + + + + def update_macro_atom(self): """ Updating the Macro Atom computations