Skip to content
This repository has been archived by the owner on Aug 17, 2023. It is now read-only.

Commit

Permalink
fix fairing install Incompatible dependencies
Browse files Browse the repository at this point in the history
  • Loading branch information
吴雨羲 committed Jan 24, 2022
1 parent 6aa5506 commit fce1c05
Show file tree
Hide file tree
Showing 4 changed files with 42 additions and 57 deletions.
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
.metaparticle
*.bak
.vscode

.idea
.env
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
Expand Down
2 changes: 1 addition & 1 deletion kubeflow/fairing/constants/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@
KFSERVING_GROUP = "serving.kubeflow.org"
KFSERVING_KIND = "InferenceService"
KFSERVING_PLURAL = "inferenceservices"
KFSERVING_VERSION = 'v1alpha2'
KFSERVING_VERSION = 'v1beta1'
KFSERVING_DEFAULT_NAME = 'fairing-kfserving-'
KFSERVING_DEPLOYER_TYPE = 'kfservice'
KFSERVING_CONTAINER_NAME = 'user-container'
Expand Down
81 changes: 34 additions & 47 deletions kubeflow/fairing/deployers/kfserving/kfserving.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,27 +12,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import uuid
import logging
import uuid

from kubernetes import client as k8s_client

from kfserving import V1alpha2EndpointSpec
from kfserving import V1alpha2PredictorSpec
from kfserving import V1alpha2TensorflowSpec
from kfserving import V1alpha2ONNXSpec
from kfserving import V1alpha2PyTorchSpec
from kfserving import V1alpha2SKLearnSpec
from kfserving import V1alpha2TritonSpec
from kfserving import V1alpha2XGBoostSpec
from kfserving import V1alpha2CustomSpec
from kfserving import V1alpha2InferenceServiceSpec
from kfserving import V1alpha2InferenceService

from kfserving import V1beta1InferenceService
from kfserving import V1beta1InferenceServiceSpec
from kfserving import V1beta1PredictorSpec
from kfserving import V1beta1SKLearnSpec
from kfserving import V1beta1TFServingSpec
from kfserving import V1beta1TorchServeSpec
from kfserving import V1beta1TritonSpec
from kfserving import V1beta1XGBoostSpec
from kubeflow.fairing import utils
from kubeflow.fairing.constants import constants
from kubeflow.fairing.deployers.deployer import DeployerInterface
from kubeflow.fairing.kubernetes.manager import KubeManager
from kubeflow.fairing import utils
from kubernetes import client as k8s_client

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -153,50 +149,41 @@ def generate_isvc(self):
canary_predictor = self.generate_predictor_spec(
self.framework, container=self.custom_canary_container)

if canary_predictor:
isvc_spec = V1alpha2InferenceServiceSpec(
default=V1alpha2EndpointSpec(predictor=default_predictor),
canary=V1alpha2EndpointSpec(predictor=canary_predictor),
canary_traffic_percent=self.canary_traffic_percent)
else:
isvc_spec = V1alpha2InferenceServiceSpec(
default=V1alpha2EndpointSpec(predictor=default_predictor),
canary_traffic_percent=self.canary_traffic_percent)

return V1alpha2InferenceService(api_version=api_version,
kind=constants.KFSERVING_KIND,
metadata=k8s_client.V1ObjectMeta(
name=self.isvc_name,
generate_name=constants.KFSERVING_DEFAULT_NAME,
namespace=self.namespace),
spec=isvc_spec)

return V1beta1InferenceService(api_version=api_version,
kind=constants.KFSERVING_KIND,
metadata=k8s_client.V1ObjectMeta(
name=self.isvc_name,
generate_name=constants.KFSERVING_DEFAULT_NAME,
namespace=self.namespace),
spec=V1beta1InferenceServiceSpec(
predictor=default_predictor
))

def generate_predictor_spec(self, framework, storage_uri=None, container=None):
'''Generate predictor spec according to framework and
default_storage_uri or custom container.
'''
if self.framework == 'tensorflow':
predictor = V1alpha2PredictorSpec(
tensorflow=V1alpha2TensorflowSpec(storage_uri=storage_uri))
predictor = V1beta1PredictorSpec(
tensorflow=V1beta1TFServingSpec(storage_uri=storage_uri))
elif self.framework == 'onnx':
predictor = V1alpha2PredictorSpec(
predictor = V1beta1PredictorSpec(
onnx=V1alpha2ONNXSpec(storage_uri=storage_uri))
elif self.framework == 'pytorch':
predictor = V1alpha2PredictorSpec(
pytorch=V1alpha2PyTorchSpec(storage_uri=storage_uri))
predictor = V1beta1PredictorSpec(
pytorch=V1beta1TorchServeSpec(storage_uri=storage_uri))
elif self.framework == 'sklearn':
predictor = V1alpha2PredictorSpec(
sklearn=V1alpha2SKLearnSpec(storage_uri=storage_uri))
predictor = V1beta1PredictorSpec(
sklearn=V1beta1SKLearnSpec(storage_uri=storage_uri))
elif self.framework == 'triton':
predictor = V1alpha2PredictorSpec(
triton=V1alpha2TritonSpec(storage_uri=storage_uri))
predictor = V1beta1PredictorSpec(
triton=V1beta1TritonSpec(storage_uri=storage_uri))
elif self.framework == 'xgboost':
predictor = V1alpha2PredictorSpec(
xgboost=V1alpha2XGBoostSpec(storage_uri=storage_uri))
elif self.framework == 'custom':
predictor = V1alpha2PredictorSpec(
custom=V1alpha2CustomSpec(container=container))
predictor = V1beta1PredictorSpec(
xgboost=V1beta1XGBoostSpec(storage_uri=storage_uri))
# elif self.framework == 'custom':
# predictor = V1beta1PredictorSpec(
# custom=V1alpha2CustomSpec(container=container))
else:
raise RuntimeError("Unsupported framework {}".format(framework))
return predictor
Expand Down
13 changes: 5 additions & 8 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,28 +1,25 @@
python-dateutil>=2.1,<=2.8.0
numpy>=1.17.3
kfserving>=0.3.0.2
kfserving>=0.6.1
docker>=3.4.1
notebook>=5.6.0
kubernetes==10.0.1
kubernetes>=12.0.0
future>=0.17.1
six>=1.11.0
google-cloud-storage>=1.13.2
google-cloud-logging>=1.13.0
requests>=2.21.0,<2.23
requests>=2.21.0
setuptools>=34.0.0
google-auth>=1.6.2
httplib2>=0.12.0
oauth2client>=4.0.0
tornado>=6.0.1
google-api-python-client>=1.7.8
cloudpickle>=0.8,<=1.4.1
urllib3==1.24.2
boto3>=1.9.0
azure-storage-file>=2.1.0
azure-mgmt-storage>=9.0.0
retrying>=1.3.3
kubeflow-tfjob>=0.1.1
kubeflow-pytorchjob>=0.1.1
kubeflow-training>=1.3.0
ibm-cos-sdk>=2.6.0
grpcio>=1.27.2
nbconvert>=5.6.1
msrestazure>=0.6.4

0 comments on commit fce1c05

Please sign in to comment.