如何将.wav文件转换为python3中的频谱图

Sre*_*i R 14 python audio numpy matplotlib spectrogram

我试图从python3中的.wav文件创建一个频谱图.

我希望最终保存的图像看起来与此图像类似:

我尝试过以下方法:

这个堆栈溢出帖子: 波形文件的谱图

这篇文章有点奏效了.运行后,我得到了

但是,此图表不包含我需要的颜色.我需要一个有颜色的光谱图.我尝试修补这些代码尝试添加颜色但是在花费了大量时间和精力之后,我无法理解它!

然后我尝试了教程.

当我尝试使用错误TypeError运行它时,此代码崩溃(在第17行):'numpy.float64'对象不能被解释为整数.

第17行:

samples = np.append(np.zeros(np.floor(frameSize/2.0)), sig)
Run Code Online (Sandbox Code Playgroud)

我试图通过施法修复它

samples = int(np.append(np.zeros(np.floor(frameSize/2.0)), sig))
Run Code Online (Sandbox Code Playgroud)

而且我也试过了

samples = np.append(np.zeros(int(np.floor(frameSize/2.0)), sig))    
Run Code Online (Sandbox Code Playgroud)

然而,这些都没有最终奏效.

我真的想知道如何将我的.wav文件转换为带有颜色的光谱图,以便我可以分析它们!任何帮助,将不胜感激!!!!!

请告诉我你是否希望我提供有关我的python版本,我尝试过的内容或我想要实现的内容的更多信息.

Tom*_*lie 30

使用scipy.signal.spectrogram.

import matplotlib.pyplot as plt
from scipy import signal
from scipy.io import wavfile

sample_rate, samples = wavfile.read('path-to-mono-audio-file.wav')
frequencies, times, spectrogram = signal.spectrogram(samples, sample_rate)

plt.pcolormesh(times, frequencies, spectrogram)
plt.imshow(spectrogram)
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.show()
Run Code Online (Sandbox Code Playgroud)

编辑:把plt.pcolormesh之前plt.imshow似乎解决了一些问题,如@Davidjb指出.

在尝试执行此操作之前,请确保您的wav文件是单声道(单声道)而非立体声(双声道).我强烈建议您阅读https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/scipy.signal.spectrogram.html上的scipy文档.

  • 对我来说,这只是一个空白的图表.我把`plt.imshow(谱图)`移到'plt.pcolormesh(...)`之后然后就可以了.知道为什么吗? (6认同)
  • 如果你无法解决这个问题,可以尝试两件事:(1)删除`plt.imshow(..)`和(2)在`np.log(谱图)`上尝试`plt.pcolormesh`. (6认同)
  • @MartinThoma,我也遇到了同样的问题;我的问题是我使用的是立体声 wav 文件而不是单声道。 (4认同)
  • 我得到`ValueError:太多的值无法解压缩(预期为2)`plt.pcolormesh` (2认同)

Beg*_*ner 7

我已经修复了你所面临的错误http://www.frank-zalkow.de/en/code-snippets/create-audio-spectrograms-with-python.html
这个实现更好,因为你可以改变binsize(例如binsize=2**8)

import numpy as np
from matplotlib import pyplot as plt
import scipy.io.wavfile as wav
from numpy.lib import stride_tricks

""" short time fourier transform of audio signal """
def stft(sig, frameSize, overlapFac=0.5, window=np.hanning):
    win = window(frameSize)
    hopSize = int(frameSize - np.floor(overlapFac * frameSize))

    # zeros at beginning (thus center of 1st window should be for sample nr. 0)   
    samples = np.append(np.zeros(int(np.floor(frameSize/2.0))), sig)    
    # cols for windowing
    cols = np.ceil( (len(samples) - frameSize) / float(hopSize)) + 1
    # zeros at end (thus samples can be fully covered by frames)
    samples = np.append(samples, np.zeros(frameSize))

    frames = stride_tricks.as_strided(samples, shape=(int(cols), frameSize), strides=(samples.strides[0]*hopSize, samples.strides[0])).copy()
    frames *= win

    return np.fft.rfft(frames)    

""" scale frequency axis logarithmically """    
def logscale_spec(spec, sr=44100, factor=20.):
    timebins, freqbins = np.shape(spec)

    scale = np.linspace(0, 1, freqbins) ** factor
    scale *= (freqbins-1)/max(scale)
    scale = np.unique(np.round(scale))

    # create spectrogram with new freq bins
    newspec = np.complex128(np.zeros([timebins, len(scale)]))
    for i in range(0, len(scale)):        
        if i == len(scale)-1:
            newspec[:,i] = np.sum(spec[:,int(scale[i]):], axis=1)
        else:        
            newspec[:,i] = np.sum(spec[:,int(scale[i]):int(scale[i+1])], axis=1)

    # list center freq of bins
    allfreqs = np.abs(np.fft.fftfreq(freqbins*2, 1./sr)[:freqbins+1])
    freqs = []
    for i in range(0, len(scale)):
        if i == len(scale)-1:
            freqs += [np.mean(allfreqs[int(scale[i]):])]
        else:
            freqs += [np.mean(allfreqs[int(scale[i]):int(scale[i+1])])]

    return newspec, freqs

""" plot spectrogram"""
def plotstft(audiopath, binsize=2**10, plotpath=None, colormap="jet"):
    samplerate, samples = wav.read(audiopath)

    s = stft(samples, binsize)

    sshow, freq = logscale_spec(s, factor=1.0, sr=samplerate)

    ims = 20.*np.log10(np.abs(sshow)/10e-6) # amplitude to decibel

    timebins, freqbins = np.shape(ims)

    print("timebins: ", timebins)
    print("freqbins: ", freqbins)

    plt.figure(figsize=(15, 7.5))
    plt.imshow(np.transpose(ims), origin="lower", aspect="auto", cmap=colormap, interpolation="none")
    plt.colorbar()

    plt.xlabel("time (s)")
    plt.ylabel("frequency (hz)")
    plt.xlim([0, timebins-1])
    plt.ylim([0, freqbins])

    xlocs = np.float32(np.linspace(0, timebins-1, 5))
    plt.xticks(xlocs, ["%.02f" % l for l in ((xlocs*len(samples)/timebins)+(0.5*binsize))/samplerate])
    ylocs = np.int16(np.round(np.linspace(0, freqbins-1, 10)))
    plt.yticks(ylocs, ["%.02f" % freq[i] for i in ylocs])

    if plotpath:
        plt.savefig(plotpath, bbox_inches="tight")
    else:
        plt.show()

    plt.clf()

    return ims

ims = plotstft(filepath)
Run Code Online (Sandbox Code Playgroud)


小智 6

import os
import wave

import pylab
def graph_spectrogram(wav_file):
    sound_info, frame_rate = get_wav_info(wav_file)
    pylab.figure(num=None, figsize=(19, 12))
    pylab.subplot(111)
    pylab.title('spectrogram of %r' % wav_file)
    pylab.specgram(sound_info, Fs=frame_rate)
    pylab.savefig('spectrogram.png')
def get_wav_info(wav_file):
    wav = wave.open(wav_file, 'r')
    frames = wav.readframes(-1)
    sound_info = pylab.fromstring(frames, 'int16')
    frame_rate = wav.getframerate()
    wav.close()
    return sound_info, frame_rate
Run Code Online (Sandbox Code Playgroud)

卡佩拉科学 - 波希米亚重力!这给了:

在此输入图像描述

使用graph_spectrogram(path_to_your_wav_file).我不记得我拿这个片段的博客了.每当我再次看到它时,都会添加链接.

  • 您能否添加一些注释以解释图片?什么是轴?颜色是什么意思? (3认同)