Skip to content

Commit

Permalink
Merge pull request #11 from zilliztech/nianliuu
Browse files Browse the repository at this point in the history
nianliuu
  • Loading branch information
nianliuu authored Sep 24, 2024
2 parents 1982080 + eaf799b commit ff89bf3
Show file tree
Hide file tree
Showing 35 changed files with 1,629 additions and 44 deletions.
6 changes: 4 additions & 2 deletions plugin-mapping.properties
Original file line number Diff line number Diff line change
Expand Up @@ -124,8 +124,6 @@ seatunnel.sink.Pulsar = connector-pulsar
seatunnel.source.ObsFile = connector-file-obs
seatunnel.sink.ObsFile = connector-file-obs
seatunnel.sink.ActiveMQ = connector-activemq
seatunnel.source.Qdrant = connector-qdrant
seatunnel.sink.Qdrant = connector-qdrant
seatunnel.source.Sls = connector-sls
seatunnel.source.Typesense = connector-typesense
seatunnel.sink.Typesense = connector-typesense
Expand All @@ -152,4 +150,8 @@ seatunnel.source.Elasticsearch = connector-elasticsearch
seatunnel.sink.Elasticsearch = connector-elasticsearch
seatunnel.source.Jdbc = connector-jdbc
seatunnel.sink.Jdbc = connector-jdbc
seatunnel.source.Pinecone = connector-pinecone
seatunnel.source.Qdrant = connector-qdrant
seatunnel.sink.Qdrant = connector-qdrant
seatunnel.source.Tencent = connector-tencent-vectordb

Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,11 @@ public void createTableInternal(TablePath tablePath, CatalogTable catalogTable)
TableSchema tableSchema = catalogTable.getTableSchema();
List<FieldType> fieldTypes = new ArrayList<>();
for (Column column : tableSchema.getColumns()) {
if(column.getOptions() != null && column.getOptions().containsKey(MilvusOptions.DYNAMIC_FIELD)
&& (Boolean) column.getOptions().get(MilvusOptions.DYNAMIC_FIELD)){
// skip dynamic field
continue;
}
fieldTypes.add(convertToFieldType(column, tableSchema.getPrimaryKey(), partitionKeyField));
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,4 +23,5 @@ public class MilvusOptions {
public static final String SHARDS_NUM = "shardsNum";
public static final String PARTITION_KEY_FIELD = "partitionKeyField";
public static final String PARTITION_NAMES = "partitionNames";
public static final String DYNAMIC_FIELD = "isDynamicField";
}
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,4 @@ public interface MilvusBatchWriter {
void flush() throws Exception;

void close() throws Exception;

long getRecordsWritten();
}
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,6 @@ public void addToBatch(SeaTunnelRow element) {
this.milvusDataCache.computeIfAbsent(partitionName, k -> new ArrayList<>());
milvusDataCache.get(partitionName).add(data);
writeCache.incrementAndGet();
writeCount.incrementAndGet();
}

@Override
Expand All @@ -165,15 +164,14 @@ public boolean needFlush() {

@Override
public void flush() throws Exception {
log.info("Starting to put {} records to Milvus.", this.batchSize);
log.info("Starting to put {} records to Milvus.", this.writeCache.get());
// Flush the batch writer
// Get the number of records completed
long recordsWritten = getRecordsWritten();
log.info("Successfully put {} records to Milvus. Total records written: {}", this.batchSize, recordsWritten);
if (this.milvusDataCache.isEmpty()) {
return;
}
writeData2Collection();
log.info("Successfully put {} records to Milvus. Total records written: {}", this.writeCache.get(), this.writeCount.get());
this.milvusDataCache = new HashMap<>();
this.writeCache.set(0L);
}
Expand All @@ -194,11 +192,6 @@ public void close() throws Exception {

}

@Override
public long getRecordsWritten() {
return this.writeCount.get();
}

private JsonObject buildMilvusData(SeaTunnelRow element) {
SeaTunnelRowType seaTunnelRowType = catalogTable.getSeaTunnelRowType();
PrimaryKey primaryKey = catalogTable.getTableSchema().getPrimaryKey();
Expand All @@ -219,7 +212,8 @@ private JsonObject buildMilvusData(SeaTunnelRow element) {
MilvusConnectionErrorCode.FIELD_IS_NULL, fieldName);
}
Gson gson = new Gson();
data.add(fieldName, gson.toJsonTree(MilvusConvertUtils.convertBySeaTunnelType(fieldType, value)));
Object object = MilvusConvertUtils.convertBySeaTunnelType(fieldType, value);
data.add(fieldName, gson.toJsonTree(object));
}
return data;
}
Expand All @@ -243,6 +237,7 @@ private void writeData2Collection() throws Exception {
log.error("error data: " + milvusDataCache);
throw new MilvusConnectorException(MilvusConnectionErrorCode.WRITE_DATA_FAIL);
}
writeCount.addAndGet(this.writeCache.get());
}

private void upsertWrite(String partitionName, List<JsonObject> data) throws InterruptedException {
Expand All @@ -259,21 +254,24 @@ private void upsertWrite(String partitionName, List<JsonObject> data) throws Int
} catch (Exception e) {
if (e.getMessage().contains("rate limit exceeded") || e.getMessage().contains("received message larger than max")) {
if (data.size() > 10) {
log.warn("upsert data failed, retry in smaller chunks");
log.warn("upsert data failed, retry in smaller chunks: {} ", data.size()/2);
this.batchSize = this.batchSize / 2;
log.info("sleep 1 minute to avoid rate limit");
//sleep 1 minute to avoid rate limit
Thread.sleep(60000);
log.info("sleep 1 minute success");
// Split the data and retry in smaller chunks
List<JsonObject> firstHalf = data.subList(0, data.size() / 2);
List<JsonObject> secondHalf = data.subList(data.size() / 2, data.size());
this.batchSize = this.batchSize / 2;
upsertWrite(partitionName, firstHalf);
upsertWrite(partitionName, secondHalf);
} else {
// If the data size is 10, throw the exception to avoid infinite recursion
throw e;
throw new MilvusConnectorException(MilvusConnectionErrorCode.WRITE_DATA_FAIL, e.getMessage(), e);
}
}
}
log.info("upsert data success");
}

private void insertWrite(String partitionName, List<JsonObject> data) {
Expand All @@ -290,10 +288,11 @@ private void insertWrite(String partitionName, List<JsonObject> data) {
} catch (Exception e) {
if (e.getMessage().contains("rate limit exceeded") || e.getMessage().contains("received message larger than max")) {
if (data.size() > 10) {
log.warn("insert data failed, retry in smaller chunks");
log.warn("insert data failed, retry in smaller chunks: {} ", data.size()/2);
// Split the data and retry in smaller chunks
List<JsonObject> firstHalf = data.subList(0, data.size() / 2);
List<JsonObject> secondHalf = data.subList(data.size() / 2, data.size());
this.batchSize = this.batchSize / 2;
insertWrite(partitionName, firstHalf);
insertWrite(partitionName, secondHalf);
} else {
Expand Down
46 changes: 46 additions & 0 deletions seatunnel-connectors-v2/connector-pinecone/pom.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.seatunnel</groupId>
<artifactId>seatunnel-connectors-v2</artifactId>
<version>${revision}</version>
</parent>

<artifactId>connector-pinecone</artifactId>
<name>SeaTunnel : Connectors V2 : Pinecone</name>

<dependencies>
<dependency>
<groupId>io.pinecone</groupId>
<artifactId>pinecone-client</artifactId>
<version>2.1.0</version>
</dependency>
<dependency>
<groupId>com.google.code.gson</groupId>
<artifactId>gson</artifactId>
<version>2.10.1</version>
</dependency>
</dependencies>


</project>
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
package org.apache.seatunnel.connectors.pinecone.config;

import org.apache.seatunnel.api.configuration.Option;
import org.apache.seatunnel.api.configuration.Options;

public class PineconeSourceConfig {
public static final String CONNECTOR_IDENTITY = "Pinecone";

public static final Option<String> API_KEY =
Options.key("api_key")
.stringType()
.noDefaultValue()
.withDescription("Pinecone token for authentication");

public static final Option<String> INDEX =
Options.key("index")
.stringType()
.noDefaultValue()
.withDescription("Pinecone index name");
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
package org.apache.seatunnel.connectors.pinecone.exception;

import lombok.Getter;
import org.apache.seatunnel.common.exception.SeaTunnelErrorCode;

@Getter
public enum PineconeConnectionErrorCode implements SeaTunnelErrorCode {
SOURCE_TABLE_SCHEMA_IS_NULL("PINECONE-01", "Source table schema is null"),
READ_DATA_FAIL("PINECONE-02", "Read data fail");

private final String code;
private final String description;

PineconeConnectionErrorCode(String code, String description) {
this.code = code;
this.description = description;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
package org.apache.seatunnel.connectors.pinecone.exception;

import org.apache.seatunnel.common.exception.SeaTunnelErrorCode;
import org.apache.seatunnel.common.exception.SeaTunnelRuntimeException;

public class PineconeConnectorException extends SeaTunnelRuntimeException {
public PineconeConnectorException(SeaTunnelErrorCode seaTunnelErrorCode) {
super(seaTunnelErrorCode, seaTunnelErrorCode.getErrorMessage());
}

public PineconeConnectorException(SeaTunnelErrorCode seaTunnelErrorCode, Throwable cause) {
super(seaTunnelErrorCode, seaTunnelErrorCode.getErrorMessage(), cause);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
package org.apache.seatunnel.connectors.pinecone.source;

import org.apache.seatunnel.api.configuration.ReadonlyConfig;
import org.apache.seatunnel.api.source.*;
import org.apache.seatunnel.api.table.catalog.CatalogTable;
import org.apache.seatunnel.api.table.catalog.TablePath;
import org.apache.seatunnel.api.table.type.SeaTunnelRow;
import org.apache.seatunnel.connectors.pinecone.config.PineconeSourceConfig;
import org.apache.seatunnel.connectors.pinecone.utils.PineconeUtils;

import java.util.ArrayList;
import java.util.List;
import java.util.Map;

public class PineconeSource implements SeaTunnelSource<SeaTunnelRow, PineconeSourceSplit, PineconeSourceState>,
SupportParallelism,
SupportColumnProjection {
private final ReadonlyConfig config;
private final Map<TablePath, CatalogTable> sourceTables;

public PineconeSource(ReadonlyConfig config) {
this.config = config;
PineconeUtils pineconeUtils = new PineconeUtils(config);
this.sourceTables = pineconeUtils.getSourceTables();
}

/**
* Get the boundedness of this source.
*
* @return the boundedness of this source.
*/
@Override
public Boundedness getBoundedness() {
return null;
}

/**
* Create source reader, used to produce data.
*
* @param readerContext reader context.
* @return source reader.
* @throws Exception when create reader failed.
*/
@Override
public SourceReader<SeaTunnelRow, PineconeSourceSplit> createReader(SourceReader.Context readerContext) throws Exception {
return new PineconeSourceReader(readerContext, config, sourceTables);
}

@Override
public List<CatalogTable> getProducedCatalogTables() {
return new ArrayList<>(sourceTables.values());
}

/**
* Create source split enumerator, used to generate splits. This method will be called only once
* when start a source.
*
* @param enumeratorContext enumerator context.
* @return source split enumerator.
* @throws Exception when create enumerator failed.
*/
@Override
public SourceSplitEnumerator<PineconeSourceSplit, PineconeSourceState> createEnumerator(SourceSplitEnumerator.Context<PineconeSourceSplit> enumeratorContext) throws Exception {
return new PineconeSourceSplitEnumertor(enumeratorContext, config, sourceTables, null);
}

/**
* Create source split enumerator, used to generate splits. This method will be called when
* restore from checkpoint.
*
* @param enumeratorContext enumerator context.
* @param checkpointState checkpoint state.
* @return source split enumerator.
* @throws Exception when create enumerator failed.
*/
@Override
public SourceSplitEnumerator<PineconeSourceSplit, PineconeSourceState> restoreEnumerator(SourceSplitEnumerator.Context<PineconeSourceSplit> enumeratorContext, PineconeSourceState checkpointState) throws Exception {
return new PineconeSourceSplitEnumertor(enumeratorContext, config, sourceTables, checkpointState);
}

/**
* Returns a unique identifier among same factory interfaces.
*
* <p>For consistency, an identifier should be declared as one lower case word (e.g. {@code
* kafka}). If multiple factories exist for different versions, a version should be appended
* using "-" (e.g. {@code elasticsearch-7}).
*/
@Override
public String getPluginName() {
return PineconeSourceConfig.CONNECTOR_IDENTITY;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
package org.apache.seatunnel.connectors.pinecone.source;

import com.google.auto.service.AutoService;
import lombok.extern.slf4j.Slf4j;
import org.apache.seatunnel.api.configuration.util.OptionRule;
import org.apache.seatunnel.api.source.SeaTunnelSource;
import org.apache.seatunnel.api.source.SourceSplit;
import org.apache.seatunnel.api.table.connector.TableSource;
import org.apache.seatunnel.api.table.factory.Factory;
import org.apache.seatunnel.api.table.factory.TableSourceFactory;
import org.apache.seatunnel.api.table.factory.TableSourceFactoryContext;
import org.apache.seatunnel.connectors.pinecone.config.PineconeSourceConfig;

import java.io.Serializable;

@Slf4j
@AutoService(Factory.class)
public class PineconeSourceFactory implements TableSourceFactory {

@Override
public <T, SplitT extends SourceSplit, StateT extends Serializable>
TableSource<T, SplitT, StateT> createSource(TableSourceFactoryContext context) {
return () -> (SeaTunnelSource<T, SplitT, StateT>) new PineconeSource(context.getOptions());
}

@Override
public OptionRule optionRule() {
return OptionRule.builder()
.required(PineconeSourceConfig.API_KEY)
.optional()
.build();
}

@Override
public Class<? extends SeaTunnelSource> getSourceClass() {
return PineconeSource.class;
}

@Override
public String factoryIdentifier() {
return PineconeSourceConfig.CONNECTOR_IDENTITY;
}
}
Loading

0 comments on commit ff89bf3

Please sign in to comment.