Skip to content

Commit

Permalink
[BYOC][COREML] Handle one symbol for each runtime (apache#5989)
Browse files Browse the repository at this point in the history
* [BYOC][COREML] Handle one symbol for each runtime

* LOG -> DLOG
  • Loading branch information
kazum authored and trevor-m committed Jul 14, 2020
1 parent 038dfc8 commit 13996b5
Show file tree
Hide file tree
Showing 5 changed files with 66 additions and 91 deletions.
10 changes: 6 additions & 4 deletions python/tvm/contrib/coreml_runtime.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,14 @@
import tvm._ffi
from ..rpc import base as rpc_base

def create(model_dir, ctx):
def create(symbol, compiled_model_path, ctx):
"""Create a runtime executor module given a coreml model and context.
Parameters
----------
model_dir : str
The directory where the compiled models are located.
symbol : str
The symbol that represents the Core ML model.
compiled_model_path : str
The path of the compiled model to be deployed.
ctx : TVMContext
The context to deploy the module. It can be local or remote when there
is only one TVMContext.
Expand All @@ -40,7 +42,7 @@ def create(model_dir, ctx):
else:
fcreate = tvm._ffi.get_global_func(runtime_func)

return CoreMLModule(fcreate(model_dir))
return CoreMLModule(fcreate(symbol, compiled_model_path))


class CoreMLModule(object):
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/contrib/target/coreml.py
Original file line number Diff line number Diff line change
Expand Up @@ -245,4 +245,4 @@ def coreml_compiler(func):
builder.compile(model_dir)

ctx = tvm.cpu(0)
return coreml_runtime.create(model_dir, ctx).module
return coreml_runtime.create(name, mlmodelc_path, ctx).module
16 changes: 7 additions & 9 deletions src/runtime/contrib/coreml/coreml_runtime.h
Original file line number Diff line number Diff line change
Expand Up @@ -119,18 +119,16 @@ class CoreMLRuntime : public ModuleNode {

/*!
* \brief Initialize the coreml runtime with coreml model and context.
* \param model_dir The directory where compiled models are located.
* \param symbol The symbol of this model.
* \param model_path The compiled model path.
*/
void Init(const std::string& model_dir);
void Init(const std::string& symbol, const std::string& model_path);

/*!
* \brief Get coreml model.
* \param model_name The name of the model.
*/
CoreMLModel& GetModel(const std::string& model_name);
/*! \brief The symbol that represents the Core ML model. */
std::string symbol_;

// Map of the avaiable CoreML models
std::unordered_map<std::string, std::unique_ptr<CoreMLModel>> model_map_;
/*! \brief The Core ML model */
std::unique_ptr<CoreMLModel> model_;
};

} // namespace runtime
Expand Down
118 changes: 46 additions & 72 deletions src/runtime/contrib/coreml/coreml_runtime.mm
Original file line number Diff line number Diff line change
Expand Up @@ -113,55 +113,39 @@
return [[model_desc outputDescriptionsByName] count];
}

void CoreMLRuntime::Init(const std::string& _model_dir) {
NSString* model_dir = [NSString stringWithUTF8String:(_model_dir).c_str()];
if (![model_dir hasPrefix:@"/"]) {
void CoreMLRuntime::Init(const std::string& symbol, const std::string& _model_path) {
symbol_ = symbol;

NSString* model_path = [NSString stringWithUTF8String:(_model_path).c_str()];
if (![model_path hasPrefix:@"/"]) {
// find models in the bundle's framework
NSBundle* bundle = [NSBundle mainBundle];
NSString* base = [bundle privateFrameworksPath];
model_dir = [base stringByAppendingPathComponent:model_dir];
}
NSFileManager* fileMamager = [NSFileManager defaultManager];
NSArray<NSString*>* files = [fileMamager contentsOfDirectoryAtPath:model_dir error:nil];
for (NSString* file in files) {
if ([[file pathExtension] isEqualToString:@"mlmodelc"]) {
NSString* model_path = [model_dir stringByAppendingPathComponent:file];
NSURL* url = [NSURL fileURLWithPath:model_path];
const std::string& model_name = [[file stringByDeletingPathExtension] UTF8String];
model_map_[model_name] = std::unique_ptr<CoreMLModel>(new CoreMLModel(url));
}
NSString* base = [[bundle privateFrameworksPath] stringByAppendingPathComponent:@"tvm"];
model_path = [base stringByAppendingPathComponent:model_path];
}
}

CoreMLModel& CoreMLRuntime::GetModel(const std::string& model_name) {
CHECK(model_map_.count(model_name) > 0) << "No such model in this module: " << model_name;
return *model_map_[model_name];
NSURL* url = [NSURL fileURLWithPath:model_path];
model_ = std::unique_ptr<CoreMLModel>(new CoreMLModel(url));
}

PackedFunc CoreMLRuntime::GetFunction(const std::string& name,
const ObjectPtr<Object>& sptr_to_self) {
// Return member functions during query.
if (name == "invoke" || name == "run") {
return PackedFunc(
[sptr_to_self, this](TVMArgs args, TVMRetValue* rv) { GetModel("main").Invoke(); });
return PackedFunc([this](TVMArgs args, TVMRetValue* rv) { model_->Invoke(); });
} else if (name == "set_input") {
return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) {
return PackedFunc([this](TVMArgs args, TVMRetValue* rv) {
const auto& input_name = args[0].operator std::string();
GetModel("main").SetInput(input_name, args[1]);
model_->SetInput(input_name, args[1]);
});
} else if (name == "get_output") {
return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) {
*rv = GetModel("main").GetOutput(args[0]);
});
return PackedFunc([this](TVMArgs args, TVMRetValue* rv) { *rv = model_->GetOutput(args[0]); });
} else if (name == "get_num_outputs") {
return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) {
*rv = GetModel("main").GetNumOutputs();
});
} else if (model_map_.count(name) != 0) {
return PackedFunc([this](TVMArgs args, TVMRetValue* rv) { *rv = model_->GetNumOutputs(); });
} else if (name == symbol_) {
// Return the packedfunc which executes the subgraph.
return PackedFunc([sptr_to_self, name, this](TVMArgs args, TVMRetValue* rv) {
CoreMLModel& model = GetModel(name);
MLModelDescription* model_desc = [model.model_ modelDescription];
return PackedFunc([this](TVMArgs args, TVMRetValue* rv) {
MLModelDescription* model_desc = [model_->model_ modelDescription];
NSString* metadata = [model_desc metadata][MLModelDescriptionKey];
NSData* data = [metadata dataUsingEncoding:NSUTF8StringEncoding];
NSDictionary* json = [NSJSONSerialization JSONObjectWithData:data
Expand All @@ -174,17 +158,17 @@
CHECK(args[i].type_code() == kTVMDLTensorHandle || args[i].type_code() == kTVMNDArrayHandle)
<< "Expect NDArray or DLTensor as inputs\n";
if (args[i].type_code() == kTVMDLTensorHandle) {
model.SetInput([input_names[i] UTF8String], args[i]);
model_->SetInput([input_names[i] UTF8String], args[i]);
} else {
LOG(FATAL) << "Not implemented";
}
}

// Execute the subgraph.
model.Invoke();
model_->Invoke();

// TODO: Support multiple outputs.
NDArray out = model.GetOutput(0);
NDArray out = model_->GetOutput(0);
if (args[args.size() - 1].type_code() == kTVMDLTensorHandle) {
DLTensor* arg = args[args.size() - 1];
out.CopyTo(arg);
Expand All @@ -199,29 +183,25 @@
}
}

Module CoreMLRuntimeCreate(const std::string& model_dir) {
Module CoreMLRuntimeCreate(const std::string& symbol, const std::string& model_path) {
auto exec = make_object<CoreMLRuntime>();
exec->Init(model_dir);
exec->Init(symbol, model_path);
return Module(exec);
}

TVM_REGISTER_GLOBAL("tvm.coreml_runtime.create").set_body([](TVMArgs args, TVMRetValue* rv) {
*rv = CoreMLRuntimeCreate(args[0]);
*rv = CoreMLRuntimeCreate(args[0], args[1]);
});

void CoreMLRuntime::SaveToBinary(dmlc::Stream* stream) {
stream->Write((uint32_t)model_map_.size());
for (const auto& kv : model_map_) {
const std::string& model_name = kv.first;
NSURL* url = kv.second->url_;
NSFileWrapper* dirWrapper = [[[NSFileWrapper alloc] initWithURL:url options:0
error:nil] autorelease];
NSData* dirData = [dirWrapper serializedRepresentation];
stream->Write(model_name);
stream->Write((uint64_t)[dirData length]);
stream->Write([dirData bytes], [dirData length]);
LOG(INFO) << "Save " << model_name << " (" << [dirData length] << " bytes)";
}
NSURL* url = model_->url_;
NSFileWrapper* dirWrapper = [[[NSFileWrapper alloc] initWithURL:url options:0
error:nil] autorelease];
NSData* dirData = [dirWrapper serializedRepresentation];
stream->Write(symbol_);
stream->Write((uint64_t)[dirData length]);
stream->Write([dirData bytes], [dirData length]);
DLOG(INFO) << "Save " << symbol_ << " (" << [dirData length] << " bytes)";
}

/*!
Expand All @@ -234,9 +214,6 @@ Module CoreMLRuntimeCreate(const std::string& model_dir) {
Module CoreMLRuntimeLoadFromBinary(void* strm) {
dmlc::Stream* stream = static_cast<dmlc::Stream*>(strm);

uint32_t nr_models;
stream->Read(&nr_models);

NSString* tempBaseDir = NSTemporaryDirectory();
if (tempBaseDir == nil) tempBaseDir = @"/tmp";

Expand All @@ -248,26 +225,23 @@ Module CoreMLRuntimeLoadFromBinary(void* strm) {
char* result = mkdtemp(buffer);
NSString* tempDir = [NSString stringWithUTF8String:result];

for (int i = 0; i < nr_models; i++) {
std::string model_name;
stream->Read(&model_name);
uint64_t length;
stream->Read(&length);
void* ptr = new char[length];
stream->Read(ptr, length);
NSData* data = [[NSData alloc] initWithBytesNoCopy:ptr length:length];
NSFileWrapper* dirWrapper =
[[[NSFileWrapper alloc] initWithSerializedRepresentation:data] autorelease];
NSString* model_dir = [tempDir
stringByAppendingPathComponent:[NSString stringWithUTF8String:(model_name + ".mlmodelc")
.c_str()]];
NSURL* url = [NSURL fileURLWithPath:model_dir];
BOOL res = [dirWrapper writeToURL:url options:0 originalContentsURL:nil error:nil];
CHECK(res) << "Failed to create model directory " << [model_dir UTF8String];
}
std::string symbol;
stream->Read(&symbol);
uint64_t length;
stream->Read(&length);
void* ptr = new char[length];
stream->Read(ptr, length);
NSData* data = [[NSData alloc] initWithBytesNoCopy:ptr length:length];
NSFileWrapper* dirWrapper =
[[[NSFileWrapper alloc] initWithSerializedRepresentation:data] autorelease];
NSString* dirname = [NSString stringWithUTF8String:(symbol + ".mlmodelc").c_str()];
NSString* model_path = [tempDir stringByAppendingPathComponent:dirname];
NSURL* url = [NSURL fileURLWithPath:model_path];
BOOL res = [dirWrapper writeToURL:url options:0 originalContentsURL:nil error:nil];
CHECK(res) << "Failed to create model directory " << [model_path UTF8String];

auto exec = make_object<CoreMLRuntime>();
exec->Init([tempDir UTF8String]);
exec->Init(symbol, [model_path UTF8String]);
return Module(exec);
}

Expand Down
11 changes: 6 additions & 5 deletions tests/python/contrib/test_coreml_runtime.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def create_coreml_model():
mode='MULTIPLY')
return coremltools.models.MLModel(builder.spec)

def verify(coreml_model, model_dir, ctx):
def verify(coreml_model, model_path, ctx):
coreml_model = create_coreml_model()

out_spec = coreml_model.output_description._fd_spec
Expand All @@ -74,7 +74,7 @@ def verify(coreml_model, model_dir, ctx):
coreml_outputs = [coreml_model.predict(inputs)[name] for name in out_names]

# inference via tvm coreml runtime
runtime = coreml_runtime.create(model_dir, ctx)
runtime = coreml_runtime.create('main', model_path, ctx)
for name in inputs:
runtime.set_input(name, tvm.nd.array(inputs[name], ctx))
runtime.invoke()
Expand All @@ -88,15 +88,16 @@ def check_remote(coreml_model):
compiled_model = xcode.compile_coreml(coreml_model, out_dir=temp.temp_dir)
xcode.popen_test_rpc(proxy_host, proxy_port, key, destination=destination,
libs=[compiled_model])
compiled_model = os.path.basename(compiled_model)
remote = rpc.connect(proxy_host, proxy_port, key=key)
ctx = remote.cpu(0)
verify(coreml_model, "tvm", ctx)
verify(coreml_model, compiled_model, ctx)

def check_local(coreml_model):
temp = util.tempdir()
xcode.compile_coreml(coreml_model, out_dir=temp.temp_dir)
compiled_model = xcode.compile_coreml(coreml_model, out_dir=temp.temp_dir)
ctx = tvm.cpu(0)
verify(coreml_model, temp.temp_dir, ctx)
verify(coreml_model, compiled_model, ctx)

coreml_model = create_coreml_model()
check_remote(coreml_model)
Expand Down

0 comments on commit 13996b5

Please sign in to comment.