@ -1,3 +0,0 @@ | |||
# Default ignored files | |||
/shelf/ | |||
/workspace.xml |
@ -1,10 +0,0 @@ | |||
<?xml version="1.0" encoding="UTF-8"?> | |||
<module type="PYTHON_MODULE" version="4"> | |||
<component name="NewModuleRootManager"> | |||
<content url="file://$MODULE_DIR$"> | |||
<excludeFolder url="file://$MODULE_DIR$/venv" /> | |||
</content> | |||
<orderEntry type="jdk" jdkName="Python 3.8 (base)" jdkType="Python SDK" /> | |||
<orderEntry type="sourceFolder" forTests="false" /> | |||
</component> | |||
</module> |
@ -1,24 +0,0 @@ | |||
<component name="InspectionProjectProfileManager"> | |||
<profile version="1.0"> | |||
<option name="myName" value="Project Default" /> | |||
<inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true"> | |||
<option name="ignoredPackages"> | |||
<value> | |||
<list size="11"> | |||
<item index="0" class="java.lang.String" itemvalue="scikit-image" /> | |||
<item index="1" class="java.lang.String" itemvalue="scipy" /> | |||
<item index="2" class="java.lang.String" itemvalue="python" /> | |||
<item index="3" class="java.lang.String" itemvalue="natsort" /> | |||
<item index="4" class="java.lang.String" itemvalue="tensorboardx" /> | |||
<item index="5" class="java.lang.String" itemvalue="pillow" /> | |||
<item index="6" class="java.lang.String" itemvalue="sklearn" /> | |||
<item index="7" class="java.lang.String" itemvalue="torch" /> | |||
<item index="8" class="java.lang.String" itemvalue="numpy" /> | |||
<item index="9" class="java.lang.String" itemvalue="torchvision" /> | |||
<item index="10" class="java.lang.String" itemvalue="torchsummary" /> | |||
</list> | |||
</value> | |||
</option> | |||
</inspection_tool> | |||
</profile> | |||
</component> |
@ -1,6 +0,0 @@ | |||
<component name="InspectionProjectProfileManager"> | |||
<settings> | |||
<option name="USE_PROJECT_PROFILE" value="false" /> | |||
<version value="1.0" /> | |||
</settings> | |||
</component> |
@ -1,4 +0,0 @@ | |||
<?xml version="1.0" encoding="UTF-8"?> | |||
<project version="4"> | |||
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.8 (base)" project-jdk-type="Python SDK" /> | |||
</project> |
@ -1,8 +0,0 @@ | |||
<?xml version="1.0" encoding="UTF-8"?> | |||
<project version="4"> | |||
<component name="ProjectModuleManager"> | |||
<modules> | |||
<module fileurl="file://$PROJECT_DIR$/.idea/Wave-U-Net.iml" filepath="$PROJECT_DIR$/.idea/Wave-U-Net.iml" /> | |||
</modules> | |||
</component> | |||
</project> |
@ -1,6 +0,0 @@ | |||
<?xml version="1.0" encoding="UTF-8"?> | |||
<project version="4"> | |||
<component name="VcsDirectoryMappings"> | |||
<mapping directory="$PROJECT_DIR$/.." vcs="Git" /> | |||
</component> | |||
</project> |
@ -1,16 +0,0 @@ | |||
# This is a sample Python script. | |||
# Press Shift+F10 to execute it or replace it with your code. | |||
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings. | |||
def print_hi(name): | |||
# Use a breakpoint in the code line below to debug your script. | |||
print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint. | |||
# Press the green button in the gutter to run the script. | |||
if __name__ == '__main__': | |||
print_hi('PyCharm') | |||
# See PyCharm help at https://www.jetbrains.com/help/pycharm/ |
@ -1,28 +0,0 @@ | |||
import torch.nn as nn | |||
import torch.nn.functional as F | |||
class ConvLayer(nn.Module): | |||
def __init__(self, input, output, kernel_size, stride, transpose=False): | |||
super(ConvLayer,self).__init__() | |||
self.input = input | |||
self.output = output | |||
self.kernel_size = kernel_size | |||
self.stride = stride | |||
self.transpose = transpose | |||
if self.transpose: | |||
self.conv = nn.ConvTranspose1d(input, output, kernel_size=self.kernel_size, | |||
stride=self.stride,padding=self.kernel_size - 1) | |||
else: | |||
self.conv = nn.Conv1d(input, output, kernel_size=self.kernel_size, stride=self.stride) | |||
self.norm = nn.GroupNorm(output // 8, output) | |||
def forward(self,x): | |||
x = self.conv(x) | |||
x = self.norm(x) | |||
output = nn.ReLU(x) | |||
return output | |||
@ -1,15 +0,0 @@ | |||
import torch | |||
import torch.nn as nn | |||
from model.Conv import ConvLayer | |||
from model.Resample import Resample | |||
class DownsamplingBlock(nn.Module): | |||
def __init__(self, stride, kernel_size, padding): | |||
super(DownsamplingBlock, self).__init__() | |||
self.stride = stride | |||
self.kernel_size = kernel_size | |||
def forward(self,x): | |||
return out |
@ -1,17 +0,0 @@ | |||
import torch.nn as nn | |||
import torch.nn.functional as F | |||
class Resample(nn.Module): | |||
def __init__(self, channels, kernel_size, stride, padding): | |||
super(Resample,self).__init__() | |||
self.channels = channels | |||
self.kernel_size = kernel_size | |||
self.stride = stride | |||
self.padding = padding | |||
def forward(self, x): | |||
out = x | |||
return out |
@ -1,17 +0,0 @@ | |||
import torch | |||
import torch.nn as nn | |||
from model.Conv import ConvLayer | |||
from model.Resample import Resample | |||
class UpsamplingBlock(nn.Module): | |||
def __init__(self, stride, kernel_size, padding): | |||
super(Upsampling,self).__init__() | |||
self.stride = stride | |||
self.kernel_size = kernel_size | |||
self.padding = padding | |||
def forward(self, x): | |||
out = x | |||
return out |
@ -1,41 +0,0 @@ | |||
import numpy as np | |||
import torch.nn as nn | |||
import torch | |||
# k = 3 | |||
# | |||
# dconv1 = nn.Conv1d(1, 1, kernel_size=k, stride=1, padding=0, bias=False) | |||
# | |||
# dconv1.weight.data = torch.ones(1, 1, k) | |||
# | |||
# x = torch.ones(1, 1, 4) | |||
# | |||
# # print('=====dconv1=====') | |||
# # | |||
# # for name, l in dconv1.named_parameters(): | |||
# # print('{}={}'.format(name, l.data)) | |||
# | |||
# x3 = dconv1(x) | |||
# | |||
# class MyModule(nn.Module): | |||
# def __init__(self): | |||
# super(MyModule, self).__init__() | |||
# self.linears = nn.ModuleList([nn.Linear(10, 10) for i in range(10)]) | |||
# | |||
# def forward(self, x): | |||
# # ModuleList can act as an iterable, or be indexed using ints | |||
# for i, l in enumerate(self.linears): | |||
# x = self.linears[i // 2](x) + l(x) | |||
# return x | |||
# | |||
# x = np.random.randint(2, size=(1,2,3,4)) | |||
# | |||
# z = np.random.randint(2, size=(2,2,2)) | |||
# y = x[0,:,1:3,2:4] | |||
# print(x) | |||
# print(y.shape) | |||
# print(y.dot(z).shape) | |||
from py2neo import Graph,Node,Relationship | |||
# 连接neo4j数据库,输入地址、用户名、密码 | |||
graph = Graph('http://localhost:7474',auth=("neo4j", "test")) |
@ -1,29 +0,0 @@ | |||
import torch | |||
import torch.nn as nn | |||
from model.Conv import ConvLayer | |||
from model.Resample import Resample | |||
class DownsamplingBlock(nn.Module): | |||
def __init__(self, stride, kernel_size): | |||
super(DownsamplingBlock, self).__init__() | |||
self.stride = stride | |||
self.kernel_size = kernel_size | |||
def forward(self,x): | |||
out = x | |||
return out | |||
class UpsamplingBlock(nn.Module): | |||
def __init__(self, stride, kernel_size, padding): | |||
super(Upsampling,self).__init__() | |||
self.stride = stride | |||
self.kernel_size = kernel_size | |||
self.padding = padding | |||
def forward(self, x): | |||
out = x | |||
return out |
@ -1,74 +0,0 @@ | |||
import torch | |||
import torchaudio | |||
import matplotlib as plt | |||
import musdb | |||
import os | |||
import numpy as np | |||
import glob | |||
import librosa | |||
import soundfile | |||
def load(path, sr=22050, mono=True, mode="numpy", offset=0.0, duration=None): | |||
y, curr_sr = librosa.load(path, sr=sr, mono=mono, res_type='kaiser_fast', offset=offset, duration=duration) | |||
if len(y.shape) == 1: | |||
# Expand channel dimension | |||
y = y[np.newaxis, :] | |||
if mode == "pytorch": | |||
y = torch.tensor(y) | |||
return y, curr_sr | |||
def write_wav(path, audio, sr): | |||
soundfile.write(path, audio.T, sr, "PCM_16") | |||
def get_musdbhq(database_path): | |||
''' | |||
Retrieve audio file paths for MUSDB HQ dataset | |||
:param database_path: MUSDB HQ root directory | |||
:return: dictionary with train and test keys, each containing list of samples, each sample containing all audio paths | |||
''' | |||
subsets = list() | |||
for subset in ["train", "test"]: | |||
print("Loading " + subset + " set...") | |||
tracks = glob.glob(os.path.join(database_path, subset, "*")) | |||
samples = list() | |||
# Go through tracks | |||
for track_folder in sorted(tracks): | |||
# Skip track if mixture is already written, assuming this track is done already | |||
example = dict() | |||
for stem in ["mix", "bass", "drums", "other", "vocals"]: | |||
filename = stem if stem != "mix" else "mixture" | |||
audio_path = os.path.join(track_folder, filename + ".wav") | |||
example[stem] = audio_path | |||
# Add other instruments to form accompaniment | |||
acc_path = os.path.join(track_folder, "accompaniment.wav") | |||
if not os.path.exists(acc_path): | |||
print("Writing accompaniment to " + track_folder) | |||
stem_audio = [] | |||
for stem in ["bass", "drums", "other"]: | |||
audio, sr = load(example[stem], sr=None, mono=False) | |||
stem_audio.append(audio) | |||
acc_audio = np.clip(sum(stem_audio), -1.0, 1.0) | |||
write_wav(acc_path, acc_audio, sr) | |||
example["accompaniment"] = acc_path | |||
samples.append(example) | |||
subsets.append(samples) | |||
return subsets | |||
path = "C:/Users/IAN/Desktop/Wave-U-Net/musdb18-hq/" | |||
res = get_musdbhq(path) | |||
print(res) |