Skip to content

Commit

Permalink
update refined infer code
Browse files Browse the repository at this point in the history
  • Loading branch information
HydrogenSulfate committed Sep 26, 2024
1 parent bc854b2 commit 892fd80
Show file tree
Hide file tree
Showing 4 changed files with 200 additions and 270 deletions.
14 changes: 5 additions & 9 deletions deepmd/pd/entrypoints/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -354,9 +354,6 @@ def freeze(FLAGS):
** atype [None, natoms] paddle.int64
** nlist [None, natoms, nnei] paddle.int32
"""
model.atomic_model.buffer_type_map.set_value(
paddle.to_tensor([ord(c) for c in model.atomic_model.type_map], dtype="int32")
)
model = paddle.jit.to_static(
model.forward_lower,
full_graph=True,
Expand All @@ -366,12 +363,10 @@ def freeze(FLAGS):
InputSpec([-1, -1, -1], dtype="int32", name="nlist"),
],
)
extra_files = {}
paddle.jit.save(
model,
path=FLAGS.output,
skip_prune_program=True,
# extra_files,
)
suffix = "json" if PIR_ENABLED.lower() in ["true", "1"] else "pdmodel"
log.info(
Expand Down Expand Up @@ -445,19 +440,20 @@ def show(FLAGS):


def change_bias(FLAGS):
if FLAGS.INPUT.endswith(".pdparams"):
if FLAGS.INPUT.endswith(".pd"):
old_state_dict = paddle.load(FLAGS.INPUT)
model_state_dict = copy.deepcopy(old_state_dict.get("model", old_state_dict))
model_params = model_state_dict["_extra_state"]["model_params"]
# elif FLAGS.INPUT.endswith(".pdmodel"):
# old_model = paddle.jit.load(FLAGS.INPUT[: -len(".pdmodel")])
# elif FLAGS.INPUT.endswith(".json"):
# old_model = paddle.jit.load(FLAGS.INPUT[: -len(".json")])
# model_params_string = old_model.get_model_def_script()
# model_params = json.loads(model_params_string)
# old_state_dict = old_model.state_dict()
# model_state_dict = old_state_dict

Check notice

Code scanning / CodeQL

Commented-out code Note

This comment appears to contain commented-out code.
else:
raise RuntimeError(
"The model provided must be a checkpoint file with a .pd extension"
"Paddle now do not support change bias directly from a freezed model file"
"Please provided a checkpoint file with a .pd extension"
# "or a frozen model with a .pdparams extension"
)
multi_task = "model_dict" in model_params
Expand Down
50 changes: 45 additions & 5 deletions deepmd/pd/model/atomic_model/dp_atomic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,18 +58,58 @@ def __init__(
super().__init__(type_map, **kwargs)
ntypes = len(type_map)
self.type_map = type_map

Check warning

Code scanning / CodeQL

Overwriting attribute in super-class or sub-class Warning

Assignment overwrites attribute type_map, which was previously defined in superclass
BaseAtomicModel
.
self.register_buffer(
"buffer_type_map",
paddle.to_tensor([ord(c) for c in self.type_map], dtype="int32"),
)
self.buffer_type_map.name = "type_map"

self.ntypes = ntypes
self.descriptor = descriptor
self.rcut = self.descriptor.get_rcut()
self.sel = self.descriptor.get_sel()
self.fitting_net = fitting
super().init_out_stat()

# specify manually for access by name in C++ inference

# register 'type_map' as buffer
def string_to_array(s: str) -> int:
return [ord(c) for c in s]

self.register_buffer(
"buffer_type_map",
paddle.to_tensor(string_to_array(" ".join(self.type_map)), dtype="int32"),
)
self.buffer_type_map.name = "buffer_type_map"
# register 'has_message_passing' as buffer(cast to int32 as problems may meets with vector<bool>)
self.register_buffer(
"buffer_has_message_passing",
paddle.to_tensor([self.has_message_passing()], dtype="int32"),
)
self.buffer_has_message_passing.name = "buffer_has_message_passing"
# register 'ntypes' as buffer
self.register_buffer(
"buffer_ntypes", paddle.to_tensor([self.ntypes], dtype="int32")
)
self.buffer_ntypes.name = "buffer_ntypes"
# register 'rcut' as buffer
self.register_buffer(
"buffer_rcut", paddle.to_tensor([self.rcut], dtype="float64")
)
self.buffer_rcut.name = "buffer_rcut"
# register 'dfparam' as buffer
self.register_buffer(
"buffer_dfparam", paddle.to_tensor([self.get_dim_fparam()], dtype="int32")
)
self.buffer_dfparam.name = "buffer_dfparam"
# register 'daparam' as buffer
self.register_buffer(
"buffer_daparam", paddle.to_tensor([self.get_dim_aparam()], dtype="int32")
)
self.buffer_daparam.name = "buffer_daparam"
# register 'aparam_nall' as buffer
self.register_buffer(
"buffer_aparam_nall",
paddle.to_tensor([self.is_aparam_nall()], dtype="int32"),
)
self.buffer_aparam_nall.name = "buffer_aparam_nall"

# @paddle.jit.export
def fitting_output_def(self) -> FittingOutputDef:
"""Get the output def of the fitting net."""
Expand Down
75 changes: 19 additions & 56 deletions source/api_cc/include/DeepPotPD.h
Original file line number Diff line number Diff line change
@@ -1,14 +1,9 @@
// SPDX-License-Identifier: LGPL-3.0-or-later
#pragma once

// #include "paddle/include/paddle_inference_api.h"
// #include "paddle/extension.h"
// #include "paddle/phi/backends/all_context.h"
#include <paddle/include/paddle_inference_api.h>

#include "DeepPot.h"
#include "common.h"
#include "commonPD.h"
#include "neighbor_list.h"

namespace deepmd {
/**
Expand Down Expand Up @@ -239,6 +234,16 @@ class DeepPotPD : public DeepPotBase {
**/
void get_type_map(std::string& type_map);

/**
* @brief Get the type map (element name of the atom types) of this model.
* @param[out] type_map The type map of this model.
**/
template<typename BUFFERTYPE>
void get_buffer(const std::string &buffer_name, std::vector<BUFFERTYPE> &buffer_arr);

template<typename BUFFERTYPE>
void get_buffer(const std::string &buffer_name, BUFFERTYPE &buffer_arr);

/**
* @brief Get whether the atom dimension of aparam is nall instead of fparam.
* @param[out] aparam_nall whether the atom dimension of aparam is nall
Expand Down Expand Up @@ -328,65 +333,23 @@ class DeepPotPD : public DeepPotBase {
private:
int num_intra_nthreads, num_inter_nthreads;
bool inited;

template <class VT>
VT get_scalar(const std::string& name) const;

int ntypes;
int ntypes_spin;
int dfparam;
int daparam;
bool aparam_nall;
int aparam_nall;
// copy neighbor list info from host
std::shared_ptr<paddle_infer::Predictor> predictor = nullptr;
std::shared_ptr<paddle_infer::Config> config = nullptr;
std::shared_ptr<paddle_infer::Config> config;
std::shared_ptr<paddle_infer::Predictor> predictor;
double rcut;
double cell_size;
NeighborListData nlist_data;
int max_num_neighbors;
InputNlist nlist;
AtomMap atommap;
int gpu_id = 0;
int do_message_passing = 0; // 1:dpa2 model 0:others
bool gpu_enabled = true;
int dtype = paddle_infer::DataType::FLOAT64;
// paddle::Tensor firstneigh_tensor;
int gpu_id;
// use int instead bool for problems may meets with vector<bool>
int do_message_passing; // 1:dpa2 model 0:others
bool gpu_enabled;
std::unique_ptr<paddle_infer::Tensor> firstneigh_tensor;
// std::unordered_map<std::string, paddle::Tensor> comm_dict;
/**
* @brief Translate Paddle exceptions to the DeePMD-kit exception.
* @param[in] f The function to run.
* @example translate_error([&](){...});
*/
// void translate_error(std::function<void()> f);
/**
* @brief Validate the size of frame and atomic parameters.
* @param[in] nframes The number of frames.
* @param[in] nloc The number of local atoms.
* @param[in] fparam The frame parameter.
* @param[in] aparam The atomic parameter.
* @tparam VALUETYPE The type of the parameters, double or float.
*/
template <typename VALUETYPE>
void validate_fparam_aparam(const int nframes,
const int& nloc,
const std::vector<VALUETYPE>& fparam,
const std::vector<VALUETYPE>& aparam) const;
/**
* @brief Tile the frame or atomic parameters if there is only
* a single frame of frame or atomic parameters.
* @param[out] out_param The tiled frame or atomic parameters.
* @param[in] nframes The number of frames.
* @param[in] dparam The dimension of the frame or atomic parameters in a
* frame.
* @param[in] param The frame or atomic parameters.
* @tparam VALUETYPE The type of the parameters, double or float.
*/
template <typename VALUETYPE>
void tile_fparam_aparam(std::vector<VALUETYPE>& out_param,
const int& nframes,
const int& dparam,
const std::vector<VALUETYPE>& param) const;

};

} // namespace deepmd
Loading

0 comments on commit 892fd80

Please sign in to comment.