Models#
- 
torch::jit::Module metatensor_torch::load_atomistic_model(std::string path, c10::optional<c10::Device> device = c10::nullopt)#
- Check and then load the metatensor atomistic model at the given - path.
- 
void metatensor_torch::check_atomistic_model(std::string path)#
- Check the exported metatensor atomistic model at the given - path, and warn/error as required.
- 
using metatensor_torch::ModelOutput = torch::intrusive_ptr<ModelOutputHolder>#
- TorchScript will always manipulate - ModelOutputHolderthrough a- torch::intrusive_ptr
- 
class ModelOutputHolder : public CustomClassHolder#
- Description of one of the quantity a model can compute. - Public Functions - 
inline ModelOutputHolder(std::string quantity_, std::string unit_, bool per_atom_, std::vector<std::string> explicit_gradients_)#
- Initialize - ModelOutputwith the given data.
 - 
std::string to_json() const#
- Serialize a - ModelOutputto a JSON string.
 - Public Members - 
std::string quantity#
- quantity of the output (e.g. energy, dipole, …). If this is an empty string, no unit conversion will be performed. 
 - 
std::string unit#
- unit of the output. If this is an empty string, no unit conversion will be performed. 
 - 
bool per_atom = false#
- is the output defined per-atom or for the overall structure 
 - 
std::vector<std::string> explicit_gradients#
- Which gradients should be computed eagerly and stored inside the output - TensorMap
 - Public Static Functions - 
static ModelOutput from_json(const std::string &json)#
- Load a serialized - ModelOutputfrom a JSON string.
 
- 
inline ModelOutputHolder(std::string quantity_, std::string unit_, bool per_atom_, std::vector<std::string> explicit_gradients_)#
- 
using metatensor_torch::ModelCapabilities = torch::intrusive_ptr<ModelCapabilitiesHolder>#
- TorchScript will always manipulate - ModelCapabilitiesHolderthrough a- torch::intrusive_ptr
- 
class ModelCapabilitiesHolder : public CustomClassHolder#
- Description of a model capabilities, i.e. everything a model can do. - Public Functions - 
inline ModelCapabilitiesHolder(std::string length_unit_, std::vector<int64_t> species_, torch::Dict<std::string, ModelOutput> outputs_)#
- Initialize - ModelCapabilitieswith the given data.
 - 
std::string to_json() const#
- Serialize a - ModelCapabilitiesto a JSON string.
 - Public Members - 
std::string length_unit#
- unit of lengths the model expects as input 
 - 
std::vector<int64_t> species#
- which atomic species the model can handle 
 - 
torch::Dict<std::string, ModelOutput> outputs#
- all possible outputs from this model and corresponding settings 
 - Public Static Functions - 
static ModelCapabilities from_json(const std::string &json)#
- Load a serialized - ModelCapabilitiesfrom a JSON string.
 
- 
inline ModelCapabilitiesHolder(std::string length_unit_, std::vector<int64_t> species_, torch::Dict<std::string, ModelOutput> outputs_)#
- 
using metatensor_torch::ModelEvaluationOptions = torch::intrusive_ptr<ModelEvaluationOptionsHolder>#
- TorchScript will always manipulate - ModelEvaluationOptionsHolderthrough a- torch::intrusive_ptr
- 
class ModelEvaluationOptionsHolder : public CustomClassHolder#
- Options requested by the simulation engine when running with a model. - Public Functions - 
ModelEvaluationOptionsHolder(std::string length_unit, torch::Dict<std::string, ModelOutput> outputs, torch::optional<TorchLabels> selected_atoms)#
- Initialize - ModelEvaluationOptionswith the given data.
 - 
inline torch::optional<TorchLabels> get_selected_atoms() const#
- Only run the calculation for a selected subset of atoms. If this is set to - None, run the calculation on all atoms. If this is a set of- Labels, it will have two dimensions named- "system"and- "atom", containing the 0-based indices of all the atoms in the selected subset.
 - 
void set_selected_atoms(torch::optional<TorchLabels> selected_atoms)#
- Setter for - selected_atoms
 - 
std::string to_json() const#
- Serialize a - ModelEvaluationOptionsto a JSON string.
 - Public Members - 
std::string length_unit#
- unit of lengths the engine uses for the model input 
 - 
torch::Dict<std::string, ModelOutput> outputs#
- requested outputs for this run and corresponding settings 
 - Public Static Functions - 
static ModelEvaluationOptions from_json(const std::string &json)#
- Load a serialized - ModelEvaluationOptionsfrom a JSON string.
 
- 
ModelEvaluationOptionsHolder(std::string length_unit, torch::Dict<std::string, ModelOutput> outputs, torch::optional<TorchLabels> selected_atoms)#