语音获取
要想发送语音信息,首先得获取语音,这里有几种方法,一种是使用directx的directxsound来录音,我为了简便使用一个开源的插件naudio来实现语音录取。 在项目中引用naudio.dll
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
|
//------------------录音相关-----------------------------
private iwavein wavein;
private wavefilewriter writer;
private void loadwasapidevicescombo()
{
var deviceenum = new mmdeviceenumerator();
var devices = deviceenum.enumerateaudioendpoints(dataflow.capture, devicestate.active).tolist();
combobox1.datasource = devices;
combobox1.displaymember = "friendlyname" ;
}
private void createwaveindevice()
{
wavein = new wavein();
wavein.waveformat = new waveformat(8000, 1);
wavein.dataavailable += ondataavailable;
wavein.recordingstopped += onrecordingstopped;
}
void ondataavailable( object sender, waveineventargs e)
{
if ( this .invokerequired)
{
this .begininvoke( new eventhandler<waveineventargs>(ondataavailable), sender, e);
}
else
{
writer.write(e.buffer, 0, e.bytesrecorded);
int secondsrecorded = ( int )(writer.length / writer.waveformat.averagebytespersecond);
if (secondsrecorded >= 10) //最大10s
{
stoprecord();
}
else
{
l_sound.text = secondsrecorded + " s" ;
}
}
}
void onrecordingstopped( object sender, stoppedeventargs e)
{
if (invokerequired)
{
begininvoke( new eventhandler<stoppedeventargs>(onrecordingstopped), sender, e);
}
else
{
finalizewavefile();
}
}
void stoprecord()
{
allchangebtn(btn_luyin, true );
allchangebtn(btn_stop, false );
allchangebtn(btn_sendsound, true );
allchangebtn(btn_play, true );
//btn_luyin.enabled = true;
//btn_stop.enabled = false;
//btn_sendsound.enabled = true;
//btn_play.enabled = true;
if (wavein != null )
wavein.stoprecording();
//cleanup();
}
private void cleanup()
{
if (wavein != null )
{
wavein.dispose();
wavein = null ;
}
finalizewavefile();
}
private void finalizewavefile()
{
if (writer != null )
{
writer.dispose();
writer = null ;
}
}
//开始录音
private void btn_luyin_click( object sender, eventargs e)
{
btn_stop.enabled = true ;
btn_luyin.enabled = false ;
if (wavein == null )
{
createwaveindevice();
}
if (file.exists(soundfile))
{
file.delete(soundfile);
}
writer = new wavefilewriter(soundfile, wavein.waveformat);
wavein.startrecording();
}
|
上面的代码实现了录音,并且写入文件p2psound_a.wav
语音发送
获取到语音后我们要把语音发送出去
当我们录好音后点击发送,这部分相关代码是
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
|
msgtranslator tran = null ;
ublic form1()
{
initializecomponent();
loadwasapidevicescombo(); //显示音频设备
config cfg = seiclient.getdefaultconfig();
cfg.port = 7777;
udpthread udp = new udpthread(cfg);
tran = new msgtranslator(udp, cfg);
tran.messagereceived += tran_messagereceived;
tran.debuged += new eventhandler<debugeventargs>(tran_debuged);
}
private void btn_sendsound_click( object sender, eventargs e)
{
if (t_ip.text == "" )
{
messagebox.show( "请输入ip" );
return ;
}
if (t_port.text == "" )
{
messagebox.show( "请输入端口号" );
return ;
}
string ip = t_ip.text;
int port = int .parse(t_port.text);
string nick = t_nick.text;
string msg = "语音消息" ;
ipendpoint remote = new ipendpoint(ipaddress.parse(ip), port);
msg m = new msg(remote, "zz" , nick, commands.sendmsg, msg, "come from a" );
m.isrequirereceive = true ;
m.extendmessagebytes = filecontent(soundfile);
m.packageno = msg.getrandomnumber();
m.type = consts.message_binary;
tran.send(m);
}
private byte [] filecontent( string filename)
{
filestream fs = new filestream(filename, filemode.open, fileaccess.read);
try
{
byte [] buffur = new byte [fs.length];
fs.read(buffur, 0, ( int )fs.length);
return buffur;
}
catch (exception ex)
{
return null ;
}
finally
{
if (fs != null )
{
//关闭资源
fs.close();
}
}
}
|
如此一来我们就把产生的语音文件发送出去了
语音的接收与播放
其实语音的接收和文本消息的接收没有什么不同,只不过语音发送的时候是以二进制发送的,因此我们在收到语音后 就应该写入到一个文件里面去,接收完成后,播放这段语音就行了。
下面这段代码主要是把收到的数据保存到文件中去,这个函数式我的netframe里收到消息时所触发的事件,在文章前面提过的那篇文章里
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
|
void tran_messagereceived( object sender, messageeventargs e)
{
msg msg = e.msg;
if (msg.type == consts.message_binary)
{
string m = msg.type + "->" + msg.username + "发来二进制消息!" ;
addservermessage(m);
if (file.exists(recive_soundfile))
{
file.delete(recive_soundfile);
}
filestream fs = new filestream(recive_soundfile, filemode.create, fileaccess.write);
fs.write(msg.extendmessagebytes, 0, msg.extendmessagebytes.length);
fs.close();
//play_sound(recive_soundfile);
changebtn( true );
}
else
{
string m = msg.type + "->" + msg.username + "说:" + msg.normalmsg;
addservermessage(m);
}
}
|
收到语音消息后,我们要进行播放,播放时仍然用刚才那个插件播放
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
|
//--------播放部分----------
private iwaveplayer waveplayer;
private wavestream reader;
public void play_sound( string filename)
{
if (waveplayer != null )
{
waveplayer.dispose();
waveplayer = null ;
}
if (reader != null )
{
reader.dispose();
}
reader = new mediafoundationreader(filename, new mediafoundationreader.mediafoundationreadersettings() { singlereaderobject = true });
if (waveplayer == null )
{
waveplayer = new waveout();
waveplayer.playbackstopped += waveplayeronplaybackstopped;
waveplayer.init(reader);
}
waveplayer.play();
}
private void waveplayeronplaybackstopped( object sender, stoppedeventargs stoppedeventargs)
{
if (stoppedeventargs.exception != null )
{
messagebox.show(stoppedeventargs.exception.message);
}
if (waveplayer != null )
{
waveplayer.stop();
}
btn_luyin.enabled = true ;
} private void btn_play_click( object sender, eventargs e)
{
btn_luyin.enabled = false ;
play_sound(soundfile);
}
|
在上面演示了接收和发送一段语音消息的界面
技术总结
主要用到的技术就是udp和naudio的录音和播放功能
希望这篇文章能够给大家提供一个思路,帮助大家实现p2p语音聊天工具。