[ 
https://issues.apache.org/jira/browse/NIFI-1280?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15962167#comment-15962167
 ] 

ASF GitHub Bot commented on NIFI-1280:
--------------------------------------

Github user olegz commented on a diff in the pull request:

    https://github.com/apache/nifi/pull/1652#discussion_r110541128
  
    --- Diff: 
nifi-nar-bundles/nifi-registry-bundle/nifi-registry-service/src/main/java/org/apache/nifi/schemaregistry/services/AvroSchemaRegistry.java
 ---
    @@ -0,0 +1,217 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +package org.apache.nifi.schemaregistry.services;
    +
    +import java.util.ArrayList;
    +import java.util.HashMap;
    +import java.util.List;
    +import java.util.Map;
    +import java.util.stream.Collectors;
    +
    +import org.apache.avro.LogicalType;
    +import org.apache.avro.Schema;
    +import org.apache.avro.Schema.Field;
    +import org.apache.avro.Schema.Type;
    +import org.apache.nifi.annotation.documentation.CapabilityDescription;
    +import org.apache.nifi.annotation.documentation.Tags;
    +import org.apache.nifi.annotation.lifecycle.OnDisabled;
    +import org.apache.nifi.annotation.lifecycle.OnEnabled;
    +import org.apache.nifi.components.PropertyDescriptor;
    +import org.apache.nifi.controller.AbstractControllerService;
    +import org.apache.nifi.controller.ConfigurationContext;
    +import org.apache.nifi.reporting.InitializationException;
    +import org.apache.nifi.serialization.SimpleRecordSchema;
    +import org.apache.nifi.serialization.record.DataType;
    +import org.apache.nifi.serialization.record.RecordField;
    +import org.apache.nifi.serialization.record.RecordFieldType;
    +import org.apache.nifi.serialization.record.RecordSchema;
    +
    +@Tags({ "schema", "registry", "avro", "json", "csv" })
    +@CapabilityDescription("Provides a service for registering and accessing 
schemas. You can register a schema "
    +        + "as a dynamic property where 'name' represents the schema name 
and 'value' represents the textual "
    +    + "representation of the actual schema following the syntax and 
semantics of Avro's Schema format.")
    +public class AvroSchemaRegistry extends AbstractControllerService 
implements SchemaRegistry {
    +
    +    private final Map<String, String> schemaNameToSchemaMap;
    +
    +    private static final String LOGICAL_TYPE_DATE = "date";
    +    private static final String LOGICAL_TYPE_TIME_MILLIS = "time-millis";
    +    private static final String LOGICAL_TYPE_TIME_MICROS = "time-micros";
    +    private static final String LOGICAL_TYPE_TIMESTAMP_MILLIS = 
"timestamp-millis";
    +    private static final String LOGICAL_TYPE_TIMESTAMP_MICROS = 
"timestamp-micros";
    +
    +
    +    public AvroSchemaRegistry() {
    +        this.schemaNameToSchemaMap = new HashMap<>();
    +    }
    +
    +    @OnEnabled
    +    public void enable(ConfigurationContext configuratiponContext) throws 
InitializationException {
    +        
this.schemaNameToSchemaMap.putAll(configuratiponContext.getProperties().entrySet().stream()
    +                .filter(propEntry -> propEntry.getKey().isDynamic())
    +                .collect(Collectors.toMap(propEntry -> 
propEntry.getKey().getName(), propEntry -> propEntry.getValue())));
    +    }
    +
    +    @Override
    +    public String retrieveSchemaText(String schemaName) {
    +        if (!this.schemaNameToSchemaMap.containsKey(schemaName)) {
    +            throw new IllegalArgumentException("Failed to find schema; 
Name: '" + schemaName + ".");
    +        } else {
    +            return this.schemaNameToSchemaMap.get(schemaName);
    +        }
    +    }
    +
    +    @Override
    +    public String retrieveSchemaText(String schemaName, Map<String, 
String> attributes) {
    +        throw new UnsupportedOperationException("This version of schema 
registry does not "
    +                + "support this operation, since schemas are only 
identofied by name.");
    --- End diff --
    
    Perhaps instead of throwing the exception we should just delegate to the 
`retrieveSchemaText(String schemaName)` with the warning message that 
Properties are ignored


> Create QueryFlowFile Processor
> ------------------------------
>
>                 Key: NIFI-1280
>                 URL: https://issues.apache.org/jira/browse/NIFI-1280
>             Project: Apache NiFi
>          Issue Type: Task
>          Components: Extensions
>            Reporter: Mark Payne
>            Assignee: Mark Payne
>             Fix For: 1.2.0
>
>         Attachments: QueryFlowFile_Record_Reader-Writer_Examples.xml
>
>
> We should have a Processor that allows users to easily filter out specific 
> columns from CSV data. For instance, a user would configure two different 
> properties: "Columns of Interest" (a comma-separated list of column indexes) 
> and "Filtering Strategy" (Keep Only These Columns, Remove Only These Columns).
> We can do this today with ReplaceText, but it is far more difficult than it 
> would be with this Processor, as the user has to use Regular Expressions, 
> etc. with ReplaceText.
> Eventually a Custom UI could even be built that allows a user to upload a 
> Sample CSV and choose which columns from there, similar to the way that Excel 
> works when importing CSV by dragging and selecting the desired columns? That 
> would certainly be a larger undertaking and would not need to be done for an 
> initial implementation.



--
This message was sent by Atlassian JIRA
(v6.3.15#6346)

Reply via email to