vandanavk commented on a change in pull request #13241: [MXNET-1210 ] Gluon 
Audio
URL: https://github.com/apache/incubator-mxnet/pull/13241#discussion_r234373209
 
 

 ##########
 File path: python/mxnet/gluon/contrib/data/audio/transforms.py
 ##########
 @@ -0,0 +1,208 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# coding: utf-8
+# pylint: disable= arguments-differ
+"Audio transforms."
+
+import warnings
+import numpy as np
+try:
+    import librosa
+except ImportError as e:
+    warnings.warn("gluon/contrib/data/audio/transforms.py : librosa dependency 
could not be resolved or \
+    imported, could not provide some/all transform.")
+
+from ..... import ndarray as nd
+from ....block import Block
+
+class MFCC(Block):
+    """
+        Extracts Mel frequency cepstrum coefficients from the audio data file
+        More details : 
https://librosa.github.io/librosa/generated/librosa.feature.mfcc.html
+
+    Parameters
+    ----------
+    Keyword arguments that can be passed, which are utilized by librosa module 
are:
+    sr: int, default 22050
+        sampling rate of the input audio signal
+    n_mfcc: int, default 20
+        number of mfccs to return
+
+
+    Inputs:
+        - **x**: input tensor (samples, ) shape.
+
+    Outputs:
+        - **out**: output array is a scaled NDArray with (samples, ) shape.
+
+    """
+
+    def __init__(self, **kwargs):
+        self.kwargs = kwargs
+        super(MFCC, self).__init__()
+
+    def forward(self, x):
+        if not librosa:
+            warnings.warn("Librosa dependency is not installed! Install that 
and retry")
+            return x
+        if isinstance(x, np.ndarray):
+            y = x
+        elif isinstance(x, nd.NDArray):
+            y = x.asnumpy()
+        else:
+            warnings.warn("Input object is not numpy or NDArray... Cannot 
apply the transform: MFCC!")
+            return x
+
+        audio_tmp = np.mean(librosa.feature.mfcc(y=y, **self.kwargs).T, axis=0)
+        return nd.array(audio_tmp)
+
+
+class Scale(Block):
+    """Scale audio numpy.ndarray from a 16-bit integer to a floating point 
number between
+    -1.0 and 1.0. The 16-bit integer is the sample resolution or bit depth.
+
+    Parameters
+    ----------
+    scale_factor : float
+        The factor to scale the input tensor by.
+
+
+    Inputs:
+        - **x**: input tensor (samples, ) shape.
+
+    Outputs:
+        - **out**: output array is a scaled NDArray with (samples, ) shape.
+
+    Examples
+    --------
+    >>> scale = audio.transforms.Scale(scale_factor=2)
+    >>> audio_samples = mx.nd.array([2,3,4])
+    >>> scale(audio_samples)
+    [1.  1.5 2. ]
+    <NDArray 3 @cpu(0)>
+
+    """
+
+    def __init__(self, scale_factor=2**31):
+        self.scale_factor = scale_factor
+        super(Scale, self).__init__()
+
+    def forward(self, x):
+        if isinstance(x, np.ndarray):
+            return nd.array(x/self.scale_factor)
+        return x / self.scale_factor
+
+
+class PadTrim(Block):
+    """Pad/Trim a 1d-NDArray of NPArray (Signal or Labels)
+
+    Parameters
+    ----------
+    max_len : int
+        Length to which the array will be padded or trimmed to.
+    fill_value: int or float
+        If there is a need of padding, what value to padd at the end of the 
input array
+
+
+    Inputs:
+        - **x**: input tensor (samples, ) shape.
+
+    Outputs:
+        - **out**: output array is a scaled NDArray with (max_len, ) shape.
+
+    Examples
+    --------
+    >>> padtrim = audio.transforms.PadTrim(max_len=9, fill_value=0)
+    >>> audio_samples = mx.nd.array([1,2,3,4,5])
+    >>> padtrim(audio_samples)
+    [1. 2. 3. 4. 5. 0. 0. 0. 0.]
+    <NDArray 9 @cpu(0)>
+
+    """
+
+    def __init__(self, max_len, fill_value=0):
+        self._max_len = max_len
+        self._fill_value = fill_value
+        super(PadTrim, self).__init__()
+
+    def forward(self, x):
+        if  isinstance(x, np.ndarray):
+            x = nd.array(x)
+        if self._max_len > x.size:
+            pad = nd.ones((self._max_len - x.size,)) * self._fill_value
+            x = nd.concat(x, pad, dim=0)
+        elif self._max_len < x.size:
+            x = x[:self._max_len]
+        return x
+
+
+class MEL(Block):
+    """Create MEL Spectrograms from a raw audio signal. Relatively pretty slow.
+
+    Parameters
 
 Review comment:
   I think rst expects class attributes to be under the "Attributes" section. 
"Parameters" for function params. Please double check this.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to