astonzhang commented on a change in pull request #10074: Add vocabulary and embedding URL: https://github.com/apache/incubator-mxnet/pull/10074#discussion_r174658117
########## File path: python/mxnet/gluon/text/vocab.py ########## @@ -0,0 +1,325 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# coding: utf-8 +# pylint: disable=consider-iterating-dictionary + +"""Vocabulary.""" +from __future__ import absolute_import +from __future__ import print_function + +import collections +from ... import nd + +from . import _constants as C +from . import embedding as ebd + + +class Vocabulary(object): + """Indexing and embedding assignment for text tokens. + + + Parameters + ---------- + counter : collections.Counter or None, default None + Counts text token frequencies in the text data. Its keys will be indexed according to + frequency thresholds such as `max_size` and `min_freq`. Keys of `counter`, + `unknown_token`, and values of `reserved_tokens` must be of the same hashable type. + Examples: str, int, and tuple. + max_size : None or int, default None + The maximum possible number of the most frequent tokens in the keys of `counter` that can be + indexed. Note that this argument does not count any token from `reserved_tokens`. Suppose + that there are different keys of `counter` whose frequency are the same, if indexing all of + them will exceed this argument value, such keys will be indexed one by one according to + their __cmp__() order until the frequency threshold is met. If this argument is None or + larger than its largest possible value restricted by `counter` and `reserved_tokens`, this + argument has no effect. + min_freq : int, default 1 + The minimum frequency required for a token in the keys of `counter` to be indexed. + unknown_token : hashable object, default '<unk>' + The representation for any unknown token. In other words, any unknown token will be indexed + as the same representation. Keys of `counter`, `unknown_token`, and values of + `reserved_tokens` must be of the same hashable type. Examples: str, int, and tuple. + reserved_tokens : list of hashable objects or None, default None + A list of reserved tokens that will always be indexed, such as special symbols representing + padding, beginning of sentence, and end of sentence. It cannot contain `unknown_token`, or + duplicate reserved tokens. Keys of `counter`, `unknown_token`, and values of + `reserved_tokens` must be of the same hashable type. Examples: str, int, and tuple. + embedding : instance or list of instances of `embedding.TokenEmbedding`, default None + The embedding to be assigned to the indexed tokens. If a list of multiple embeddings are + provided, their embedding vectors will be concatenated for the same token. + + + Properties + ---------- + embedding : instance of :class:`~mxnet.gluon.text.embedding.TokenEmbedding` + The embedding of the indexed tokens. + idx_to_token : list of strs + A list of indexed tokens where the list indices and the token indices are aligned. + reserved_tokens : list of strs or None + A list of reserved tokens that will always be indexed. + token_to_idx : dict mapping str to int + A dict mapping each token to its index integer. + unknown_token : hashable object + The representation for any unknown token. In other words, any unknown token will be indexed + as the same representation. + + + Examples + -------- + >>> fasttext = text.embedding.create('fasttext', file_name='wiki.simple.vec') + >>> text_data = " hello world \n hello nice world \n hi world \n" Review comment: resolved ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services