Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Resources and Error Resolvers #1

Open
siddhantprateek opened this issue Oct 12, 2023 · 5 comments
Open

Resources and Error Resolvers #1

siddhantprateek opened this issue Oct 12, 2023 · 5 comments
Labels
documentation Improvements or additions to documentation

Comments

@siddhantprateek
Copy link
Owner

To resolve serverless-dynamodb-local dependency: 99x/serverless-dynamodb-local#294

@siddhantprateek siddhantprateek added the documentation Improvements or additions to documentation label Oct 12, 2023
@Anindyadeep
Copy link

Anindyadeep commented Oct 12, 2023

Can I build the nest for my bird like this ...

// Package cluster provides local access to cluster-level metadata
/*
 * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
 */
package cluster

import (
	"context"
	"io"

	"github.com/NVIDIA/aistore/api/apc"
	"github.com/NVIDIA/aistore/cluster/meta"
	"github.com/NVIDIA/aistore/cmn"
	"github.com/NVIDIA/aistore/cmn/cos"
)

type (
	GetReaderResult struct {
		R        io.ReadCloser
		Err      error
		ExpCksum *cos.Cksum
		Size     int64
		ErrCode  int
	}

	BackendProvider interface {
		Provider() string
		MaxPageSize() uint
		CreateBucket(bck *meta.Bck) (errCode int, err error)
		ListObjects(bck *meta.Bck, msg *apc.LsoMsg, lst *cmn.LsoResult) (errCode int, err error)
		ListBuckets(qbck cmn.QueryBcks) (bcks cmn.Bcks, errCode int, err error)
		PutObj(r io.ReadCloser, lom *LOM) (errCode int, err error)
		DeleteObj(lom *LOM) (errCode int, err error)

		// with context
		HeadBucket(ctx context.Context, bck *meta.Bck) (bckProps cos.StrKVs, errCode int, err error)
		HeadObj(ctx context.Context, lom *LOM) (objAttrs *cmn.ObjAttrs, errCode int, err error)
		GetObj(ctx context.Context, lom *LOM, owt cmn.OWT) (errCode int, err error)
		GetObjReader(ctx context.Context, lom *LOM) GetReaderResult
	}
)

Or may be something like this

#ifndef TENSORFLOW_COMPILER_MLIR_XLA_HLO_FUNCTION_IMPORTER_H_
#define TENSORFLOW_COMPILER_MLIR_XLA_HLO_FUNCTION_IMPORTER_H_

#include <unordered_map>

#include "absl/types/optional.h"
#include "mlir/IR/Attributes.h"  // TF:llvm-project
#include "mlir/IR/Builders.h"  // TF:llvm-project
#include "mlir/IR/Function.h"  // TF:llvm-project
#include "mlir/IR/MLIRContext.h"  // TF:llvm-project
#include "mlir/IR/Module.h"  // TF:llvm-project
#include "mlir/IR/StandardTypes.h"  // TF:llvm-project
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/compiler/mlir/xla/ir/hlo_ops.h"
#include "tensorflow/compiler/xla/status.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/platform/types.h"

namespace xla {

class HloModule;
class HloComputation;
class HloInstruction;
class Shape;

// Helper class for importing HloComputations.
class HloFunctionImporter {
 public:
  static StatusOr<mlir::FuncOp> ImportFunction(
      mlir::ModuleOp module, mlir::Builder* builder,
      std::unordered_map<xla::HloComputation*, mlir::FuncOp>* function_map,
      xla::HloComputation* computation);

 private:
  HloFunctionImporter(
      mlir::ModuleOp module, mlir::Builder* builder,
      std::unordered_map<xla::HloComputation*, mlir::FuncOp>* function_map)
      : context_(module.getContext()),
        module_(module),
        builder_(builder),
        function_map_(function_map) {}

  StatusOr<mlir::FuncOp> ImportFunction(xla::HloComputation* computation);

  // Imports the given computation in the specified region.
  tensorflow::Status ImportComputation(HloComputation* computation,
                                       mlir::Region* region);

  // Imports instructions from the given computation in the specified block.
  // Assumes that the block already has correct arguments populated.
  tensorflow::Status ImportInstructions(HloComputation* computation,
                                        mlir::Block* block);

  // Imports an instruction.
  StatusOr<mlir::Operation*> ImportInstruction(xla::HloInstruction* instruction,
                                               mlir::OpBuilder* func_builder);

  // Gets the MLIR operand values from an HLO Instruction.
  StatusOr<llvm::SmallVector<mlir::Value, 4>> GetOperands(
      xla::HloInstruction* instruction);

  // Converts xla Tensor type to the corresponding MLIR type.
  StatusOr<mlir::RankedTensorType> ConvertTensorType(const xla::Shape& shape);

  // Returns the output type of an HloInstruction.
  StatusOr<mlir::Type> GetReturnType(xla::HloInstruction* instruction);

  // Takes a list of HloInstructions and generates the list of types used for
  // input, bypassing tuples to subsets.
  Status GetMlirTypes(const std::vector<xla::HloInstruction*>& instructions,
                      llvm::SmallVectorImpl<mlir::Type>* types);

  // Returns the Mlir Value for the corresponding HloInstruction.
  StatusOr<mlir::Value> GetMlirValue(xla::HloInstruction* instruction);

  // Converts an XLA PrecisionConfig to the corresponding MLIR attribute.
  mlir::NamedAttribute ConvertPrecisionConfig(xla::HloInstruction* instruction);

  // Converts an XLA ComparisonDirection to the corresponding MLIR attribute.
  mlir::NamedAttribute ConvertComparisonDirection(
      xla::HloInstruction* instruction);

  // Converts the dimensions of an HLO instruction into an MLIR attribute.
  mlir::DenseIntElementsAttr ConvertDimensions(
      llvm::ArrayRef<tensorflow::int64> op_dimensions);

  // Converts Array ref to an DenseIntElementsAttr.
  mlir::DenseIntElementsAttr Convert(llvm::ArrayRef<int64_t> op_dimensions);

  // Converts Array ref to padding attribute. Input is a flattened list of
  // padding low and padding high for each of the spatial dimensions.
  mlir::NamedAttribute ConvertPadding(llvm::ArrayRef<int64_t> padding);

  // Converts the dot dimensions to attribute.
  mlir::NamedAttribute ConvertDotDimensionNumbers(
      const DotDimensionNumbers& dnums);

  // Converts the conv dimensions to attributes.
  mlir::NamedAttribute ConvertConvDimensionNumbers(
      const xla::ConvolutionDimensionNumbers& dnums);

  // Converts the gather dimensions to attributes.
  mlir::NamedAttribute ConvertGatherDimensionNumbers(
      const xla::GatherDimensionNumbers& dnums);

  // Converts the scatter dimensions to attributes.
  mlir::NamedAttribute ConvertScatterDimensionNumbers(
      const xla::ScatterDimensionNumbers& dnums);

  // Converts replica groups to attribute
  mlir::NamedAttribute ConvertReplicaGroups(
      const std::vector<ReplicaGroup>& replica_groups);

  // Converts channel id to attribute
  mlir::NamedAttribute ConvertChannelHandle(
      absl::optional<tensorflow::int64> channel_id);

  // Converts channel handle to attribute
  mlir::NamedAttribute ConvertChannelHandle(const xla::ChannelHandle& channel);

  // Converts XLA instruction source target pairs to MLIR attribute.
  mlir::NamedAttribute ConvertSourceTargetPairs(
      const std::vector<std::pair<tensorflow::int64, tensorflow::int64>>&
          source_target_pairs);

  mlir::MLIRContext* context_;
  mlir::ModuleOp module_;
  mlir::Builder* builder_;

  // Mapping from HloComputation to the created MLIR function.
  std::unordered_map<xla::HloComputation*, mlir::FuncOp>* function_map_;

  // Mapping from HloInstructions to the associative MLIR values.
  std::unordered_map<xla::HloInstruction*, mlir::Value> instruction_value_map_;
};

}  // namespace xla

#endif  // TENSORFLOW_COMPILER_MLIR_XLA_HLO_FUNCTION_IMPORTER_H_

lul

@siddhantprateek
Copy link
Owner Author

@Anindyadeep Interesting feature request, we can have separate issue and discussion on this.

@Anindyadeep
Copy link

No No that does not make sense, I need it here rn

@siddhantprateek
Copy link
Owner Author

@Anindyadeep you can go ahead and open a new issue on this. Feel free to guide yourself and I have added some resources that may help.

Resources

How to create Github Issue

@Anindyadeep
Copy link

Okay I am making a PR

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
documentation Improvements or additions to documentation
Projects
None yet
Development

No branches or pull requests

2 participants