Shally Verma(1234sv) replied on github web page:
include/odp/api/spec/comp.h
line 635
@@ -0,0 +1,873 @@
+/* Copyright (c) 2017, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP Compression
+ */
+
+#ifndef ODP_API_COMP_H_
+#define ODP_API_COMP_H_
+
+#include <odp/visibility_begin.h>
+#include <odp/api/support.h>
+#include <odp/api/packet.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @defgroup odp_compression ODP COMP
+ * ODP Compression is an API set to do compression+hash or decompression+hash
+ * operations on data. Hash is calculated on plaintext.
+ *
+ * if opcode = ODP_COMP_COMPRESS, then it will apply hash and then compress,
+ * if opcode = ODP_COMP_DECOMPRESS, then it will decompress and then apply
+ * hash.
+ * Independent hash-only operations are not supported. Implementation should
+ * perform hash along with valid compression algo.
+ * Macros, enums, types and operations to utilize compression interface.
+ * @{
+ */
+
+/**
+ * @def ODP_COMP_SESSION_INVALID
+ * Invalid session handle
+ */
+
+/**
+ * @typedef odp_comp_session_t
+ * Compression/Decompression session handle
+ */
+
+/**
+ * Compression API operation mode
+ */
+typedef enum {
+ /** Synchronous, return results immediately */
+ ODP_COMP_SYNC,
+ /** Asynchronous, return results via event queue */
+ ODP_COMP_ASYNC
+} odp_comp_op_mode_t;
+
+/**
+ * Comp API operation type.
+ *
+ */
+typedef enum {
+ /** Compress */
+ ODP_COMP_OP_COMPRESS,
+ /** Decompress */
+ ODP_COMP_OP_DECOMPRESS
+} odp_comp_op_t;
+
+/**
+ * Comp API hash algorithm
+ *
+ */
+typedef enum {
+ /** ODP_COMP_HASH_ALG_NONE - No hash algorithm selected. */
+ ODP_COMP_HASH_ALG_NONE,
+ /** ODP_COMP_HASH_ALG_SHA1 - SHA-1 hash algorithm. */
+ ODP_COMP_HASH_ALG_SHA1,
+ /** ODP_COMP_HASH_ALG_SHA256 - SHA-2 hash algorithm
+ * 256-bit digest length.
+ */
+ ODP_COMP_HASH_ALG_SHA256
+} odp_comp_hash_alg_t;
+
+/**
+ * Comp API compression algorithm
+ *
+ */
+typedef enum {
+ /** No algorithm specified.
+ * Means no compression, output == input.
+ * if provided, no operation (compression/decompression or hash)
+ * applied on input. Added for testing purpose.
+ */
+ ODP_COMP_ALG_NULL,
+ /** DEFLATE - RFC1951 */
+ ODP_COMP_ALG_DEFLATE,
+ /** ZLIB - RFC1950 */
+ ODP_COMP_ALG_ZLIB,
+ /** LZS */
+ ODP_COMP_ALG_LZS
+} odp_comp_alg_t;
+
+/**
+ * Comp API session creation return code
+ *
+ */
+typedef enum {
+ /** Session created */
+ ODP_COMP_SES_CREATE_ERR_NONE,
+ /** Creation failed, no resources */
+ ODP_COMP_SES_CREATE_ERR_ENOMEM,
+ /** Creation failed, bad compression params */
+ ODP_COMP_SES_CREATE_ERR_INV_COMP,
+ /** Creation failed, bad hash params */
+ ODP_COMP_SES_CREATE_ERR_INV_HASH,
+ /** Creation failed,requested configuration not supported*/
+ ODP_COMP_SES_CREATE_ERR_NOT_SUPPORTED
+} odp_comp_ses_create_err_t;
+
+/**
+ * Comp API operation return codes
+ *
+ */
+typedef enum {
+ /** Operation completed successfully*/
+ ODP_COMP_ERR_NONE,
+ /** Operation paused due to insufficient output buffer.
+ *
+ * This is not an error condition. On seeing this situation,
+ * Implementation should maintain context of in-progress operation and
+ * application should call packet processing API again with valid
+ * output buffer but no other alteration to operation params
+ * (odp_comp_op_param_t).
+ *
+ * if using async mode, application should either make sure to
+ * provide sufficient output buffer size OR maintain relevant
+ * context (or ordering) information with respect to each input packet
+ * en-queued for processing.
+ *
+ */
+ ODP_COMP_ERR_OUT_OF_SPACE,
+ /** Invalid user data pointers */
+ ODP_COMP_ERR_DATA_PTR,
+ /** Invalid input data size */
+ ODP_COMP_ERR_DATA_SIZE,
+ /** Compression Algo failure */
+ ODP_COMP_ERR_ALGO_FAIL,
+ /** Error if operation has been requested in an invalid state */
+ ODP_COMP_ERR_INV_STATE,
+ /** Error if API does not support any of the operational parameter. */
+ ODP_COMP_ERR_NOT_SUPPORTED,
+ /** Error if session is invalid. */
+ ODP_COMP_ERR_INV_SESS
+} odp_comp_err_t;
+
+/**
+ * Comp API enumeration for preferred compression level/speed. Applicable
+ * only for compression operation not decompression.
+ * Value provided defines a trade-off between speed and compression ratio.
+ *
+ * If compression level == ODP_COMP_LEVEL_MIN, output will be produced at
+ * fastest possible rate,
+ *
+ * If compression level == ODP_COMP_LEVEL_MAX, output will be highest possible
+ * compression,
+ *
+ * compression level == ODP_COMP_LEVEL_DEFAULT means implementation will use
+ * its default choice of compression level.
+ *
+ */
+typedef enum {
+ /* Use implementation default */
+ ODP_COMP_LEVEL_DEFAULT = 0,
+ /** Minimum compression (fastest in speed) */
+ ODP_COMP_LEVEL_MIN,
+ /** Maximum compression (slowest in speed) */
+ ODP_COMP_LEVEL_MAX,
+} odp_comp_level_t;
+
+/**
+ * Comp API enumeration for huffman encoding. Valid for compression operation.
+ *
+ */
+typedef enum {
+ /** use implementation default to choose between compression codes */
+ ODP_COMP_HUFFMAN_CODE_DEFAULT = 0,
+ /** use fixed huffman codes */
+ ODP_COMP_HUFFMAN_CODE_FIXED,
+ /** use dynamic huffman coding */
+ ODP_COMP_HUFFMAN_CODE_DYNAMIC,
+} odp_comp_huffman_code_t;
+
+/**
+ * Hash algorithms in a bit field structure
+ *
+ */
+typedef union odp_comp_hash_algos_t {
+ /** hash algorithms */
+ struct {
+ /** ODP_COMP_HASH_ALG_SHA1 */
+ uint32_t sha1 : 1;
+
+ /** ODP_COMP_HASH_ALG_SHA256 */
+ uint32_t sha256 : 1;
+
+ } bit;
+
+ /** All bits of the bit field structure
+ *
+ * This field can be used to set/clear all flags, or bitwise
+ * operations over the entire structure.
+ */
+ uint32_t all_bits;
+} odp_comp_hash_algos_t;
+
+/**
+ * Comp algorithms in a bit field structure
+ *
+ */
+typedef union odp_comp_algos_t {
+ /** Compression algorithms */
+ struct {
+ /** ODP_COMP_ALG_NULL */
+ uint32_t null : 1;
+
+ /** ODP_COMP_ALG_DEFLATE */
+ uint32_t deflate : 1;
+
+ /** ODP_COMP_ALG_ZLIB */
+ uint32_t zlib : 1;
+
+ /** ODP_COMP_ALG_LZS */
+ uint32_t lzs :1;
+ } bit;
+
+ /** All bits of the bit field structure
+ * This field can be used to set/clear all flags, or bitwise
+ * operations over the entire structure.
+ */
+ uint32_t all_bits;
+} odp_comp_algos_t;
+
+/**
+ * Compression Interface Capabilities
+ *
+ */
+typedef struct odp_comp_capability_t {
+ /** Maximum number of sessions */
+ uint32_t max_sessions;
+
+ /** Supported compression algorithms */
+ odp_comp_algos_t comp_algos;
+
+ /** Supported hash algorithms. */
+ odp_comp_hash_algos_t hash_algos;
+
+ /** Support type for synchronous operation mode (ODP_COMP_SYNC).
+ * User should set odp_comp_session_param_t:mode based on
+ * support level as indicated by this param.
+ */
+ odp_support_t sync;
+
+ /** Support type for asynchronous operation mode (ODP_COMP_ASYNC).
+ * User should set odp_comp_session_param_t:mode param based on
+ * support level as indicated by this param.
+ */
+ odp_support_t async;
+} odp_comp_capability_t;
+
+/**
+ * Hash algorithm capabilities
+ *
+ */
+typedef struct odp_comp_hash_alg_capability_t {
+ /** Digest length in bytes */
+ uint32_t digest_len;
+} odp_comp_hash_alg_capability_t;
+
+/**
+ * Compression algorithm capabilities structure for each algorithm.
+ *
+ */
+typedef struct odp_comp_alg_capability_t {
+ /** Enumeration indicating algorithm support for dictionary load */
+ odp_support_t support_dict;
+
+ /** Optional Maximum length of dictionary supported
+ * by implementation of an algorithm.
+ *
+ * Invalid if support_dict == ODP_SUPPORT_NO.
+ *
+ * Implementation use dictionary of length less than or equal to value
+ * indicated by dict_len. if set to 0 and if support_dict ==
+ * ODP_SUPPORT_YES, then implementation will use dictionary length
+ * less than or equal to user input length in odp_comp_set_dict()
+ * and update used dictionary length at output of the call.
+ *
+ */
+ uint32_t dict_len;
+
+ /** Maximum compression level supported by implementation of this algo.
+ * Indicates number of compression levels supported by implementation,
+ *
+ * where,
+ *
+ * 1 means fastest compression i.e. output produced at
+ * best possible speed at the expense of compression quality, and
+ *
+ * max_level means best compression i.e.output produced is best possible
+ * compressed content at the expense of speed.
+ *
+ * Example, if max_level = 4 , it means algorithm supports four levels
+ * of compression from value 1 up to 4. User can set this value from
+ * 1 (fastest compression) to 4 (best compression).
+ * See RFC1950 for an example explanation to level.
+ *
+ * Value 0 mean implementation use its default value.
+ *
+ */
+ uint32_t max_level;
+
+ /* Supported hash algorithms */
+ odp_comp_hash_algos_t hash_algo;
+} odp_comp_alg_capability_t;
+
+/**
+ * Comp API dictionary type
+ * Consists of pointer to byte buffer. length of dictionary
+ * indicated by length parameter.
+ */
+typedef struct odp_comp_dict_t {
+ /** pointer to byte array */
+ uint8_t *buf;
+ /** length of the dictionary. */
+ uint32_t len;
+} odp_comp_dict_t;
+
+/**
+ * Comp API algorithm specific parameters
+ *
+ */
+typedef struct odp_comp_algo_param_t {
+ /** struct for defining deflate algorithm parameters.
+ * Also initialized by other deflate based algorithms , ex. ZLIB
+ */
+ struct comp_alg_def_param {
+ /** compression level where
+ * ODP_COMP_LEVEL_MIN <= level <= ODP_COMP_LEVEL_MAX
+ */
+ odp_comp_level_t level;
+ /** huffman code to use */
+ odp_comp_huffman_code_t comp_code;
+ } deflate;
+
+ /** struct for defining zlib algorithm parameters.
+ */
+ struct comp_alg_zlib_param {
+ /** deflate algo params */
+ struct comp_alg_def_param def;
+ } zlib;
+} odp_comp_alg_param_t;
+
+ /**
+ * Comp API session creation parameters
+ *
+ */
+typedef struct odp_comp_session_param_t {
+ /** Compress vs. Decompress operation */
+ odp_comp_op_t op;
+
+ /** Sync vs Async mode
+ *
+ * When mode = ODP_COMP_SYNC, odp_comp_xxx()
+ * should be called.
+ *
+ * When mode = ODP_COMP_ASYNC, odp_comp_xxx_enq()
+ * should be called.
+ *
+ * Use odp_comp_capability() for supported mode.
+ *
+ */
+ odp_comp_op_mode_t mode;
+
+ /** Compression algorithm
+ *
+ * Use odp_comp_capability() for supported algorithms.
+ */
+ odp_comp_alg_t comp_algo;
+
+ /** Hash algorithm
+ *
+ * Use odp_comp_alg_capability() for supported hash algo for
+ * compression algo given as comp_alg. Implementation should not
+ * support hash only operation on data. output should always contain
+ * data + hash.
+ *
+ */
+ odp_comp_hash_alg_t hash_algo;
+
+ /** parameters specific to compression */
+ odp_comp_alg_param_t algo_param;
+
+ /** Async mode completion event queue
+ *
+ * When mode = ODP_COMP_ASYNC, user should wait on ODP_EVENT_PACKET
+ * with subtype ODP_EVENT_PACKET_COMP on this queue.
+ *
+ * By default, implementation enques completion events in-order-of
+ * request submission and thus queue is considered ordered.
+ *
+ * Please note, behavior could be changed or enhanced
+ * to queue event in-order-of their completion to enable
+ * performance-oriented application to leverage hw offered parallelism.
+ * However, this will be subject to application requirement and more
+ * explicit defined use-case.
+ *
+ */
+ odp_queue_t compl_queue;
+} odp_comp_session_param_t;
+
+/**
+ * Comp API per packet operation result
+ *
+ */
+typedef struct odp_comp_packet_op_result_t {
+ /** Operation Return Code */
+ odp_comp_err_t err;
+
+ /** Output packet data range. Where,
+ *
+ * offset = base offset within packet, current data is written at.
+ *
+ * length = length of data written.
+ *
+ */
+ odp_packet_data_range_t out_data_range;
+} odp_comp_packet_op_result_t;
+
+/**
+ * Comp packet API per packet operation parameters
+ */
+typedef struct odp_comp_packet_op_param_t {
+ /** Session handle from creation */
+ odp_comp_session_t session;
+
+ /** Boolean indicating if current input is last chunk of data, where
+ *
+ * true : last chunk
+ *
+ * false: more to follow
+ *
+ * If set to true, indicates this is the last chunk of
+ * data. After processing of last chunk of data is complete i.e.
+ * call returned with any error code except ODP_COMP_ERR_OUT_OF_SPACE,
+ * implementation should move algorithm to stateless mode
+ * for next of batch of operation i.e. reset history,
+ * insert 'End of Block' marker into compressed data stream(if
+ * supported by algo).See deflate/zlib for interpretation of
+ * stateless/stateful.
+ *
+ * For stateless compressions (ex ipcomp), last should be set to 'true'
+ * for every input packet processing call.
+ *
+ * For compression + hash, digest will be available after
+ * last chunk is processed completely. In case of
+ * ODP_COMP_ERR_OUT_OF_SPACE, application should keep on calling
+ * odp_comp_xxx() API with more output buffer unless call returns
+ * with ODP_COMP_ERR_NONE or other failure code except
+ * ODP_COMP_ERR_OUT_OF_SPACE.
+ */
+ odp_bool_t last;
+
+ /** Input data range to process */
+ odp_packet_data_range_t in_data_range;
+
+ /** Output packet data range.
+ * Indicates where processed packet will be written
+ * where length specifies,
+ * length of available packet buffer at input, and
+ * length of data written at output
+ */
+ odp_packet_data_range_t out_data_range;
+} odp_comp_packet_op_param_t;
+
+/**
+ * Query comp capabilities
+ *
+ * Output comp capabilities on success.
+ *
+ * @param[out] capa Pointer to capability structure for output
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odp_comp_capability(odp_comp_capability_t *capa);
+
+/**
+ * Query supported compression algorithm capabilities
+ *
+ * Output algorithm capabilities.
+ *
+ * @param comp Compression algorithm
+ * @param[out] capa Array of capability structures for output
+ * @param num Maximum number of capability structures to output
+ *
+ * @return Number of capability structures for the algorithm. If this is larger
+ * than 'num', only 'num' first structures were output and application
+ * may call the function again with a larger value of 'num'.
+ * @retval <0 on failure
+ */
+int odp_comp_alg_capability(odp_comp_alg_t comp,
+ odp_comp_alg_capability_t capa[], int num);
+
+/**
+ * Query supported hash algorithm capabilities
+ *
+ * Outputs all supported configuration options for the algorithm.
+ *
+ * @param hash Hash algorithm
+ * @param[out] capa Array of capability structures for output
+ * @param num Maximum number of capability structures to output
+ *
+ * @return Number of capability structures for the algorithm. If this is larger
+ * than 'num', only 'num' first structures were output and application
+ * may call the function again with a larger value of 'num'.
+ * @retval <0 on failure
+ */
+int odp_comp_hash_alg_capability(odp_comp_hash_alg_t hash,
+ odp_comp_hash_alg_capability_t capa[],
+ int num);
+
+/**
+ * Initialize comp session parameters
+ *
+ * Initialize an odp_comp_session_param_t to its default values for
+ * all fields.
+ *
+ * @param param Pointer to odp_comp_session_param_t to be initialized
+ */
+void odp_comp_session_param_init(odp_comp_session_param_t *param);
+
+/**
+ * Compression session creation
+ *
+ * Create a comp session according to the session parameters. Use
+ * odp_comp_session_param_init() to initialize parameters into their
+ * default values.
+ *
+ * @param param Session parameters
+ * @param session Created session else ODP_COMP_SESSION_INVALID
+ * @param status Failure code if unsuccessful
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odp_comp_session_create(odp_comp_session_param_t *param,
+ odp_comp_session_t *session,
+ odp_comp_ses_create_err_t *status);
+
+/**
+ * Comp session destroy
+ *
+ * Destroy an unused session. Result is undefined if session is being used
+ * (i.e. asynchronous operation is in progress).
+ *
+ * @param session Session handle
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odp_comp_session_destroy(odp_comp_session_t session);
+
+/**
+ * Comp set dictionary
+ *
+ * Should be called when there is no operation in progress i.e.
+ * before initiating processing of first chunk of data and
+ * after processing of last chunk of data is complete.
+ *
+ * @param session Session handle
+ * @param dict[in,out] Pointer to dictionary.
+ * implementation should update length of dictionary
+ * used at output.
+ * @retval 0 on success
+ * @retval <0 on failure
+ *
+ * @note:
+ * Application should call odp_comp_alg_capability() to query 'support_dict'
+ * before making call to this API.
+ */
+int odp_comp_set_dict(odp_comp_session_t session,
+ odp_comp_dict_t *dict);
+
+/**
+ * Comp API to process a packet synchronously
+ *
+ * If session is created in ODP_COMP_SYNC mode, this call wait for operation
+ * to complete.
+ *
+ * If session is created in ODP_COMP_ASYNC mode, behavior is undefined.
+ *
+ * For compression + hash, call returns with hash appended at the end of
+ * packet which has last field set in corresponding param.
+ *
+ * User should compute processed data len = output_data_range.len - digest_len,
+ * where digest_len queried through odp_comp_hash_alg_capability().
+ *
+ * For every input packet, user should pass corresponding valid output
+ * packet.
+ *
+ * For every successful or partially processed packet, api update result param
+ * with out_data_range indicating length of the data written in output packet
+ * and error code indicating operation status.
+ *
+ * A packet is said to be partially processed if it runs out of buffer during
+ * processing and need more buffer from user to continue. Under such condition,
+ * API fails with error code code in result param set to
+ * ODP_COMP_ERR_OUT_OF_SPACE.
+ *
+ * In case of ODP_COMP_ERR_OUT_OF_SPACE error, application should
+ * keep on calling odp_comp_op() API with more output buffer until call returns
+ * with success or any other error code.
+ *
+ * @param pkt_in Input Packet
+ * @param pkt_out Output Packet
+ * @param param Operation parameter
+ * @param[out] result Result parameter, containing after operation result.
+ *
+ * @return 0 on success
+ * @retval <0 on failure
+ */
+int odp_comp_op(const odp_packet_t pkt_in,
+ const odp_packet_t pkt_out,
+ const odp_comp_packet_op_param_t *param,
+ odp_comp_packet_op_result_t *result);
+
Comment:
Does the partially processed packet contain valid data or not == does
application keep or discard the packet?
Shally > It will be a valid data and thus application is supposed to consume
it.
Is there any way for application to prepare large enough packet in advance?
Shally > Its more of application dependent such as ipcomp it knows packet
cannot go beyond a maximum length of IP Packet.
E.g. a per algo capability of worst case decompression ratio?
Shally > No. it is not guaranteed. Its more of data type property and algorithm
capability.
Should "pkt_out = INVALID" be supported?
Shally > No.
implementation allocates large enough packet.
Shally > we refrained from this approach as in such case implementation may end
up eating whole of user pool. Better approach is pass back what ever is
available at output. Now, application may consume it and reuse same packet for
next o/p OR extend the packet by some length (say double it up).
Single packet handle should be defined as const.
Shally > It is const in API param list ... odp_comp_op(const odp_packet_t
pkt_in....) .. what am i missing here?
There is out_data_range defined in both op_param_t and op_result_t. Which one
is updated on output?
Shally> One in op_result.
May be operation output comments should be removed from op_param_t
documentation.
Shally > Yea. Did that already.
Please mark your approval to these.
> Shally Verma(1234sv) wrote:
> ... with valid pkt_out and pkt_in being ODP_PACKET_INVALID...
> >> No. pkt_in too has to be valid and untouched. It is quite possible, that
> >> algorithm may or may not consume full input from input buffer when it hit
> >> out of buffer situation. Thus, none of the parameters except output packet
> >> should be updated by application until fully process and that's why
> >> description says " On seeing this situation,
> * Implementation should maintain context of in-progress operation and
> * application should call packet processing API again with valid
> * output buffer but no other alteration to operation params
> * (odp_comp_op_param_t)."
>> Shally Verma(1234sv) wrote:
>> default values are implementation dependent. There's no standard what they
>> could be set to. Example, one implementation may set default comp_alg to
>> zlib and other to deflate or possibly any other to zlib+hash. So, we cannot
>> document here what default values should be.
>>> Shally Verma(1234sv) wrote:
>>> Can this return more than one hash_alg_capa (== digest_len) per hash_alg?
>>> >>No. ideally, an implementation should return only 1 single capability per
>>> >>hash algorithm. I cannot think why a valid implementation would return
>>> >>more than 1 capability according to capability structure we have here.
>>> This API signature has been retained to keep consistency with other
>>> interface capability API.
>>>> Shally Verma(1234sv) wrote:
>>>> offset is not updated by spec as compression does not support
>>>> auto-allocation of packet and starts data write at given offset only. For
>>>> description, I rephrased it as below.
>>>> " /** Output packet data range.
>>>> *
>>>> * out_data_range.offset = offset in output packet data buffer to start
>>>> * write at,
>>>> *
>>>> * out_data_range.length = length of available space for output
>>>> * from given offset.
>>>> *
>>>> */
>>>>> Shally Verma(1234sv) wrote:
>>>>> Ok.
>>>>>> Shally Verma(1234sv) wrote:
>>>>>> "then we should document that the value is ignored."
>>>>>> >> Sorry, I didn't get that. You referring to which value to be ignored?
>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>> Did you refer to note starting here " * Please note, behavior could be
>>>>>>> changed or enhanced........"? if yes, okay I will remove that text.
>>>>>>> Please confirm.
>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>> accepted.
>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>> 1.comp_algo is always set. It should be e.g. ALG_NULL by default.
>>>>>>>>> >> I am bit confused. Are you suggesting adding a description to
>>>>>>>>> >> "comp ALG_NULL+hash"? We agreed in past that " user must use
>>>>>>>>> >> crypto for standalone hashing and compression algo NULL is only
>>>>>>>>> >> for testing purpose" and current description on usage of hash and
>>>>>>>>> >> ALG_NULL abide that. So, we can leave current description as is.
>>>>>>>>> 2. What this means: "output should always contain data + hash" ? If
>>>>>>>>> we enable hashing, hash is always calculated. It's not optional for
>>>>>>>>> implementation to calculate it - right.
>>>>>>>>> >> I will rephrase it to "output *will* always contain data + hash".
>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>> " This should be specified at operation documentation" means which
>>>>>>>>>> place? user guide Or ?
>>>>>>>>>>
>>>>>>>>>> Do you mean I should remove this description from here? Or
>>>>>>>>>> "+ * if opcode = ODP_COMP_COMPRESS, then it will apply hash and
>>>>>>>>>> then compress,
>>>>>>>>>> + * if opcode = ODP_COMP_DECOMPRESS, then it will decompress and
>>>>>>>>>> then apply
>>>>>>>>>> + * hash."
>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>> First, async call can reuse sync call spec by saying: "...
>>>>>>>>>>> processes packets otherwise identically to odp_comp_op(), but
>>>>>>>>>>> outputs resulting packets as ODP_EVENT_PACKET events ...". Then the
>>>>>>>>>>> remaining text can concentrate on what is unique on async vs sync
>>>>>>>>>>> operation.
>>>>>>>>>>>
>>>>>>>>>>> I think OUT_OF_SPACE spec might need re-thinking:
>>>>>>>>>>> 1) user sees a partial packet (error on a received packet)
>>>>>>>>>>> 2) implementation waits that user calls op_enq() again
>>>>>>>>>>> 3) what input packet should be given on the new call? I think the
>>>>>>>>>>> previous operation already consumed the input packet which contains
>>>>>>>>>>> the input data for operation. Did implementation store it?
>>>>>>>>>>> 4) how long implementation waits for new output packet to arrive
>>>>>>>>>>> from user, what if user don't send new output packets.
>>>>>>>>>>>
>>>>>>>>>>> Async spec (and also sync) spec would be more robust if
>>>>>>>>>>> implementation would allocate output packets.
>>>>>>>>>>>
>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>> Application does not allocate buffers but packets.
>>>>>>>>>>>>
>>>>>>>>>>>> Does the partially processed packet contain valid data or not ==
>>>>>>>>>>>> does application keep or discard the packet?
>>>>>>>>>>>>
>>>>>>>>>>>> Is there any way for application to prepare large enough packet in
>>>>>>>>>>>> advance? E.g. a per algo capability of worst case decompression
>>>>>>>>>>>> ratio? Should "pkt_out = INVALID" be supported? ==> implementation
>>>>>>>>>>>> allocates large enough packet.
>>>>>>>>>>>>
>>>>>>>>>>>> Single packet handle should **not** be defined as const.
>>>>>>>>>>>>
>>>>>>>>>>>> There is out_data_range defined in both op_param_t and
>>>>>>>>>>>> op_result_t. Which one is updated on output? May be operation
>>>>>>>>>>>> output comments should be removed from op_param_t documentation.
>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>> Simply say that:
>>>>>>>>>>>>> This operation does de/-compression in synchronous mode
>>>>>>>>>>>>> (ODP_COMP_SYNC).
>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>> "implementation should update length of dictionary used at
>>>>>>>>>>>>>> output."
>>>>>>>>>>>>>> This is too vague: either length is updated or it's not. In
>>>>>>>>>>>>>> which cases the update would be done and what that would mean?
>>>>>>>>>>>>>> Also, I assume that implementation copies the data, so that
>>>>>>>>>>>>>> application is free to overwrite the memory after the call. That
>>>>>>>>>>>>>> (copies data) should be mentioned also.
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> Is there a benefit on output param here, especially if it
>>>>>>>>>>>>>> updates only the length and not the pointer. Maybe API would be
>>>>>>>>>>>>>> cleaner, if also *dict is input only param (and thus const*).
>>>>>>>>>>>>>> Function may return e.g. number of bytes copied/used.
>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>> This can be simply
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>> odp_comp_session_t odp_comp_session_create(const
>>>>>>>>>>>>>>> odp_comp_session_param_t *param)
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>> Extra error codes may be returned through odp_errno(). Usually,
>>>>>>>>>>>>>>> we have not defined error codes, since it's implementation
>>>>>>>>>>>>>>> defines what all things can possibly go wrong, and typically
>>>>>>>>>>>>>>> application logic is just interested on success vs fail - not
>>>>>>>>>>>>>>> so much why we failed.
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>> This function prototype is copy-paste from crypto API, but all
>>>>>>>>>>>>>>> other APIs are the type I suggested above.
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>> Note: const pointer for "param"
>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>> It would be good to define default values for
>>>>>>>>>>>>>>>> odp_comp_session_param_t (at the type documentation).
>>>>>>>>>>>>>>>> Otherwise, application needs to always set every param.
>>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>>> Can this return more than one hash_alg_capa (== digest_len)
>>>>>>>>>>>>>>>>> per hash_alg? Currently, application cannot select digest
>>>>>>>>>>>>>>>>> length. So, what application should do if this returns more
>>>>>>>>>>>>>>>>> than one capa.
>>>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>>>> Is array of capabilities needed with comp algorithms? It
>>>>>>>>>>>>>>>>>> would be needed if user would need to select between
>>>>>>>>>>>>>>>>>> multiple discrete options - e.g. if only some dict_len
>>>>>>>>>>>>>>>>>> values would make sense (1kB, 4kB, 16kB), but nothing in
>>>>>>>>>>>>>>>>>> between.
>>>>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>>>>> "where length specifies,
>>>>>>>>>>>>>>>>>>> length of available packet buffer at input, and
>>>>>>>>>>>>>>>>>>> length of data written at output"
>>>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>>> This specification should be clarified. "Input" refers
>>>>>>>>>>>>>>>>>>> refers to out_data_range.length as operation input param
>>>>>>>>>>>>>>>>>>> and "output" refers to the same as operation output
>>>>>>>>>>>>>>>>>>> parameter.
>>>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>>> In addition to length, offset may be updated at operation
>>>>>>>>>>>>>>>>>>> output (at least when new packet was allocated). That
>>>>>>>>>>>>>>>>>>> should be mentioned also.
>>>>>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>>>>>> This last chuck:
>>>>>>>>>>>>>>>>>>>> "In case of
>>>>>>>>>>>>>>>>>>>> ODP_COMP_ERR_OUT_OF_SPACE, application should keep on
>>>>>>>>>>>>>>>>>>>> calling
>>>>>>>>>>>>>>>>>>>> odp_comp_xxx() API with more output buffer unless call
>>>>>>>>>>>>>>>>>>>> returns
>>>>>>>>>>>>>>>>>>>> with ODP_COMP_ERR_NONE or other failure code except
>>>>>>>>>>>>>>>>>>>> ODP_COMP_ERR_OUT_OF_SPACE. "
>>>>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>>>> Documents operation return / error codes rather than
>>>>>>>>>>>>>>>>>>>> "last" field usage. So, I'd remove it from here.
>>>>>>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>>>>>>> If only option for a stateless algorithm is true, then we
>>>>>>>>>>>>>>>>>>>>> should document that the value is ignored.
>>>>>>>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>>>>>>>> Typo: "enques" -> enqueues"
>>>>>>>>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>>>>>>>>> Remove this block text since it's speculation of what
>>>>>>>>>>>>>>>>>>>>>>> could be specified.
>>>>>>>>>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>>>>>>>>>> Typo: "... given as comp_alg" -> "... given as
>>>>>>>>>>>>>>>>>>>>>>>> comp_algo"
>>>>>>>>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>>>>>>>> There are two SHOULDs, but I think those are not
>>>>>>>>>>>>>>>>>>>>>>>> needed:
>>>>>>>>>>>>>>>>>>>>>>>> 1) comp_algo is always set. It should be e.g. ALG_NULL
>>>>>>>>>>>>>>>>>>>>>>>> by default. We have defined that NULL does not have
>>>>>>>>>>>>>>>>>>>>>>>> hash capability. It would be also possible to leave
>>>>>>>>>>>>>>>>>>>>>>>> NULL + hash to be implementation defined capability.
>>>>>>>>>>>>>>>>>>>>>>>> When we say that ALG_NULL is only for test, it should
>>>>>>>>>>>>>>>>>>>>>>>> be enough to discourage people from misusing this API
>>>>>>>>>>>>>>>>>>>>>>>> for hashing.
>>>>>>>>>>>>>>>>>>>>>>>> 2) What this means: "output should always contain data
>>>>>>>>>>>>>>>>>>>>>>>> + hash" ? If we enable hashing, hash is always
>>>>>>>>>>>>>>>>>>>>>>>> calculated. It's not optional for implementation to
>>>>>>>>>>>>>>>>>>>>>>>> calculate it - right.
>>>>>>>>>>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>>>>>>>>>>> Field name should match the type == "hash_algos"
>>>>>>>>>>>>>>>>>>>>>>>>> (many algorithms in a bit field). Underneath
>>>>>>>>>>>>>>>>>>>>>>>>> "hash_algo" is used with enum odp_comp_hash_alg_t
>>>>>>>>>>>>>>>>>>>>>>>>> (one algo).
>>>>>>>>>>>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>>>>>>>>>>>> I did comment the same a month back:
>>>>>>>>>>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>>>>>>>>>> Is the data input only? If it is, then this should
>>>>>>>>>>>>>>>>>>>>>>>>>> be "const uint8_t *buf"
>>>>>>>>>>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>>>>>>>>>> What is expected about the format of the data? Null
>>>>>>>>>>>>>>>>>>>>>>>>>> terminated strings? If it's strings, then e.g.
>>>>>>>>>>>>>>>>>>>>>>>>>> "const char *str" would be better.
>>>>>>>>>>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>>>>>>>>>> Also the type documentation should be updated:
>>>>>>>>>>>>>>>>>>>>>>>>>> * This is not dictionary type, but pointer into
>>>>>>>>>>>>>>>>>>>>>>>>>> dictionary data - right?
>>>>>>>>>>>>>>>>>>>>>>>>>> * "Consists of pointer to byte buffer. length of
>>>>>>>>>>>>>>>>>>>>>>>>>> dictionary indicated by length parameter. ", this
>>>>>>>>>>>>>>>>>>>>>>>>>> needs clarification. What is expected about the
>>>>>>>>>>>>>>>>>>>>>>>>>> data behind the pointer?
>>>>>>>>>>>>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>>>>>>>>>>>>> @psavol Hmm. I thought that the user should use
>>>>>>>>>>>>>>>>>>>>>>>>>>> this value. And `odp_comp_level_t` should be
>>>>>>>>>>>>>>>>>>>>>>>>>>> dropped from the spec.
>>>>>>>>>>>>>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>>>>>>>>>>>>>> How this relates to odp_comp_level_t enumeration ?
>>>>>>>>>>>>>>>>>>>>>>>>>>>> Shouldn't user use those levels and not a number
>>>>>>>>>>>>>>>>>>>>>>>>>>>> from 0 to this one ?
>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>>>>>>>>>>>>>>> This is description of how de/-compression
>>>>>>>>>>>>>>>>>>>>>>>>>>>>> operation work. This should be specified at
>>>>>>>>>>>>>>>>>>>>>>>>>>>>> operation documentation, and optimally only
>>>>>>>>>>>>>>>>>>>>>>>>>>>>> there. Otherwise, we are in danger of this spec
>>>>>>>>>>>>>>>>>>>>>>>>>>>>> getting deprecated (== in conflict with operation
>>>>>>>>>>>>>>>>>>>>>>>>>>>>> documentation) without noticing.
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> True, all types need to be odp_ prefixed. Also
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> this type would be better to typedef and pull
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> out (from inside of algo_param_t) since it's
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> used in two places already.
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> As a side note: it might be logical, we might
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> want to add `odp_support_t` field to notify
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> user if implementation supports stateful
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> compression. I can easily assume that there
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> might be implementations implementing only
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> stateless compression.
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> odp_comp_alg_deflate_param, please.
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Please check the see list. It mentions
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> non-existing functions.
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> no result here
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> s/Else/Otherwise/
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> ... an error....
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> ... with valid pkt_out and pkt_in being
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> ODP_PACKET_INVALID...
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> It is an error to call this api when
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> session was created in ODP_COMP_ASYNC
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> mode.
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> As said, single and multi versions are
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> possible, but not as proposed in this
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> patch. Both versions should have the
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> same param structure and specification.
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Only difference would be pkt_in[] vs
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> pkt_in, pkt_out[] vs pkt_out, param[]
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> vs. param ...
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Then I propose to add a new API like
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> odp_comp_compress_multi(const
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> odp_packet_t pkt_in[], odp_packet_t
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> pkt_out[], const
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> odp_comp_compress_param_t param[], int
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> num_pkt);
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> which takes of handling multiple
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> packets and retain current version for
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> single packet handling.
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Thanks
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Shally
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> @bala-manoharan
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Results for both sync and async
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> through result function:
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> int
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> odp_comp_result(odp_comp_packet_result_t
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> *result, odp_packet_t packet);
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> OR have the results as output
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> parameter. Results function is needed
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> any way for the async operation.
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Shally Verma(1234sv) wrote:
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> int odp_comp_compress(const
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> odp_packet_t pkt_in[], odp_packet_t
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> pkt_out[], const
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> odp_comp_compress_param_t param[],
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> int num_pkt);
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Single packet version may be also
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> added, but is not necessary since
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> couple of additional if-clauses does
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> not significant overhead when
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> compare CPU cycles of the entire
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> comp operation.
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> int
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> odp_comp_compress_single(odp_packet_t
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> pkt_in, odp_packet_t pkt_out, const
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> odp_comp_compress_param_t param);
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> The two prototypes would be the same
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> except [] and num_pkt.
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
https://github.com/Linaro/odp/pull/156#discussion_r137745423
updated_at 2017-09-08 09:18:52