src/org/sipdroid/media/RtpStreamReceiver.java
changeset 823 2036ebfaccda
child 826 8649e502be0e
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/org/sipdroid/media/RtpStreamReceiver.java	Fri Nov 20 19:29:42 2009 +0100
@@ -0,0 +1,412 @@
+/*
+ * Copyright (C) 2009 The Sipdroid Open Source Project
+ * Copyright (C) 2005 Luca Veltri - University of Parma - Italy
+ * 
+ * This file is part of Sipdroid (http://www.sipdroid.org)
+ * 
+ * Sipdroid is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ * 
+ * This source code is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License
+ * along with this source code; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+package org.sipdroid.media;
+
+import jlibrtp.AppCallerThread;
+import jlibrtp.DataFrame;
+import jlibrtp.Participant;
+import jlibrtp.RTPAppIntf;
+import jlibrtp.RTPReceiverThread;
+import jlibrtp.RTPSession;
+
+import org.sipdroid.media.codecs.Codec;
+import org.sipdroid.net.tools.DataFramePool;
+import org.sipdroid.net.tools.DatagramPool;
+
+import android.content.ContentResolver;
+import android.content.Context;
+import android.content.SharedPreferences.Editor;
+import android.media.AudioFormat;
+import android.media.AudioManager;
+import android.media.AudioTrack;
+import android.media.ToneGenerator;
+import android.preference.PreferenceManager;
+import android.provider.Settings;
+
+/**
+ * RtpStreamReceiver is a generic stream receiver. It receives packets from RTP
+ * and writes them into an OutputStream.
+ */
+public class RtpStreamReceiver extends Thread implements RTPAppIntf{
+
+	/** Whether working in debug mode. */
+	public static boolean DEBUG = true;
+
+	/** Size of the read buffer */
+	public static final int BUFFER_SIZE = 1024;
+
+	/** Maximum blocking time, spent waiting for reading new bytes [milliseconds] */
+	public static final int SO_TIMEOUT = 200;
+
+	/** The RtpSocket */
+	//RtpSocket rtp_socket = null;
+	RTPSession rtpSession = null;
+	byte[] buffer;
+
+	/** The codec */
+	private Codec codec;
+	private Context mContext;
+
+	private int frame_size;
+	private int codec_frame_size;
+	private int sampling_rate;
+
+	/** Whether it is running */
+	boolean running;
+	AudioManager am;
+	ContentResolver cr;
+
+	private int codec_divider;
+	public static int speakermode;
+
+	short lin[];
+	short lin2[];
+	int user, server, lserver, luser, cnt, todo, headroom, len, timeout = 1, seq = 0, cnt2 = 0, m = 1,
+	expseq, getseq, vm = 1, gap, oldvol;
+	boolean islate;
+
+	Codec.Context codecCtx;
+
+	AudioTrack track;
+
+	/**
+	 * Constructs a RtpStreamReceiver.
+	 * @param ctx 
+	 * @param remoteAddr 
+	 * 
+	 * @param output_stream
+	 *            the stream sink
+	 * @param socket
+	 *            the local receiver SipdroidSocket
+	 */
+	public RtpStreamReceiver(Codec ci, RTPSession rtpSession, Context ctx) {
+		init(ci, rtpSession, ctx);
+	}
+
+	/** Inits the RtpStreamReceiver 
+	 * @param ctx 
+	 * @param remoteAddr
+	 **/
+	private void init(Codec ci, RTPSession rtpSession, Context ctx) {
+		this.rtpSession = rtpSession;
+		codec = ci;
+		codec_frame_size = codec.getInfo().codecFrameSize;		
+		codec_divider = codec.getInfo().rtpSampleDivider;
+		frame_size = 160 * codec_divider;
+		sampling_rate = codec.getInfo().samplingRate;
+		mContext = ctx;
+	}
+
+	/** Whether is running */
+	public boolean isRunning() {
+		return running;
+	}
+
+	/** Stops running */
+	public void halt() {
+		running = false;
+	}
+
+	public int speaker(int mode) {
+		int old = speakermode;
+
+		saveVolume();
+		speakermode = mode;
+		restoreVolume();
+		return old;
+	}
+
+	double smin = 200,s;
+
+	private int REAL_BUFFER_SIZE;
+	public static int nearend;
+
+	void calc(short[] lin,int off,int len) {
+		int i,j;
+		double sm = 30000,r;
+
+		for (i = 0; i < len; i += 5) {
+			j = lin[i+off];
+			s = 0.03*Math.abs(j) + 0.97*s;
+			if (s < sm) sm = s;
+			if (s > smin) nearend = 3000/5;
+			else if (nearend > 0) nearend--;
+		}
+		for (i = 0; i < len; i++) {
+			j = lin[i+off];
+			if (j > 6550)
+				lin[i+off] = 6550*5;
+			else if (j < -6550)
+				lin[i+off] = -6550*5;
+			else
+				lin[i+off] = (short)(j*5);
+		}
+		r = (double)len/100000;
+		smin = sm*r + smin*(1-r);
+	}
+
+	void restoreVolume() {
+		am.setStreamVolume(AudioManager.STREAM_MUSIC,
+				PreferenceManager.getDefaultSharedPreferences(mContext).getInt("volume"+speakermode, 
+						am.getStreamMaxVolume(AudioManager.STREAM_MUSIC)*
+						(speakermode == AudioManager.MODE_NORMAL?4:3)/4
+				),0);
+	}
+
+	void saveVolume() {
+		Editor edit = PreferenceManager.getDefaultSharedPreferences(mContext).edit();
+		edit.putInt("volume"+speakermode,am.getStreamVolume(AudioManager.STREAM_MUSIC));
+		edit.commit();
+	}
+
+	void saveSettings() {
+		if (!PreferenceManager.getDefaultSharedPreferences(mContext).getBoolean("oldvalid",false)) {
+			int oldvibrate = am.getVibrateSetting(AudioManager.VIBRATE_TYPE_RINGER);
+			int oldvibrate2 = am.getVibrateSetting(AudioManager.VIBRATE_TYPE_NOTIFICATION);
+			if (!PreferenceManager.getDefaultSharedPreferences(mContext).contains("oldvibrate2"))
+				oldvibrate2 = AudioManager.VIBRATE_SETTING_ON;
+			int oldpolicy = android.provider.Settings.System.getInt(cr, android.provider.Settings.System.WIFI_SLEEP_POLICY, 
+					Settings.System.WIFI_SLEEP_POLICY_DEFAULT);
+			Editor edit = PreferenceManager.getDefaultSharedPreferences(mContext).edit();
+			edit.putInt("oldvibrate", oldvibrate);
+			edit.putInt("oldvibrate2", oldvibrate2);
+			edit.putInt("oldpolicy", oldpolicy);
+			edit.putInt("oldring",am.getStreamVolume(AudioManager.STREAM_RING));
+			edit.putBoolean("oldvalid", true);
+			edit.commit();
+		}
+	}
+
+	void restoreSettings() {
+		int oldvibrate = PreferenceManager.getDefaultSharedPreferences(mContext).getInt("oldvibrate",0);
+		int oldvibrate2 = PreferenceManager.getDefaultSharedPreferences(mContext).getInt("oldvibrate2",0);
+		int oldpolicy = PreferenceManager.getDefaultSharedPreferences(mContext).getInt("oldpolicy",0);
+		am.setVibrateSetting(AudioManager.VIBRATE_TYPE_RINGER,oldvibrate);
+		am.setVibrateSetting(AudioManager.VIBRATE_TYPE_NOTIFICATION,oldvibrate2);
+		Settings.System.putInt(cr, Settings.System.WIFI_SLEEP_POLICY, oldpolicy);
+		am.setStreamVolume(AudioManager.STREAM_RING, PreferenceManager.getDefaultSharedPreferences(mContext).getInt("oldring",0), 0);
+		Editor edit = PreferenceManager.getDefaultSharedPreferences(mContext).edit();
+		edit.putBoolean("oldvalid", false);
+		edit.commit();
+	}
+
+	public static float good, late, lost, loss;
+
+	/** Runs it in a new Thread. */
+	@Override
+	public void run() {
+		REAL_BUFFER_SIZE = BUFFER_SIZE * codec_divider;
+		speakermode = AudioManager.MODE_IN_CALL;
+		android.os.Process.setThreadPriority(android.os.Process.THREAD_PRIORITY_AUDIO);
+		am = (AudioManager) mContext.getSystemService(Context.AUDIO_SERVICE);
+		cr = mContext.getContentResolver();
+		saveSettings();
+
+		Settings.System.putInt(cr, Settings.System.WIFI_SLEEP_POLICY,Settings.System.WIFI_SLEEP_POLICY_NEVER);
+		am.setVibrateSetting(AudioManager.VIBRATE_TYPE_RINGER,AudioManager.VIBRATE_SETTING_OFF);
+		am.setVibrateSetting(AudioManager.VIBRATE_TYPE_NOTIFICATION,AudioManager.VIBRATE_SETTING_OFF);
+		oldvol = am.getStreamVolume(AudioManager.STREAM_MUSIC);
+		restoreVolume();
+
+		track = new AudioTrack(AudioManager.STREAM_MUSIC, sampling_rate, AudioFormat.CHANNEL_CONFIGURATION_MONO, AudioFormat.ENCODING_PCM_16BIT,
+				REAL_BUFFER_SIZE*2*2, AudioTrack.MODE_STREAM);
+		track.play();
+		lin = new short[REAL_BUFFER_SIZE];
+		lin2 = new short[REAL_BUFFER_SIZE];
+		user = 0; //number of samples written
+		server = 0; // number of samples played
+		lserver = 0; // last number of samples played
+		luser = -sampling_rate; // last number of samples written
+		cnt = 0;
+		codecCtx = codec.initDecoder(); 
+		System.gc();
+		println("DEBUG: rtpStreamReceiver session launch");
+		running = true;
+		AppCallerThread appCall = rtpSession.getAppCallerThrd();
+		RTPReceiverThread recv = rtpSession.getRTPRecvThrd();
+		DataFrame frame = null;
+		recv.init();
+		while (running) {
+			recv.readPacketToBuffer();
+			frame = appCall.getNextDataFrame();
+			if (frame == null)
+				continue;
+			buffer = (frame.getPkt()[0]).getPayload();
+			if (timeout != 0) { //on ecrit du blanc sur l'audiotrack
+				user += track.write(lin,0,REAL_BUFFER_SIZE);
+				user += track.write(lin,0,REAL_BUFFER_SIZE);
+			}
+			timeout = 0;
+			if (running) {
+
+				//println("seq " + seq + " frame seq " + (frame.getPkt()[0]).getSeqNumber());
+				if (seq == (frame.getPkt()[0]).getSeqNumber()) {
+					m++;
+					continue;
+				}
+
+				codec.decode(codecCtx, buffer, 12, codec_frame_size, lin, 0);
+				len = frame_size;
+
+				if (speakermode == AudioManager.MODE_NORMAL)
+					calc(lin,0,len);
+
+				server = track.getPlaybackHeadPosition(); // on récupère la position actuel de la tete de lecture
+				headroom = user-server; // on recalcule la différence entre la position de la tete de lecture et ce qu'on a écrit sur la piste
+				//println("headroom " + headroom + " user " + user + " server " + server);
+				if (headroom < 250 * codec_divider) { // si le headroom est trop petit, il faut rattraper le retard en écrivant du blanc/répétant ce qu'il y a à ecrire
+					todo = 625 * codec_divider - headroom;
+					//println("insert "+todo);
+					android.util.Log.d("RECV", "insert");
+					islate = true;
+					if (todo < len)
+						user += track.write(lin,0,todo); // on écrit le packet reçu tel quel
+					else
+						user += track.write(lin2,0,todo); // ecriture de blanc de taille 625 - headroom, avant l'écriture du packet
+				} else
+					islate = false;
+
+				if (headroom > 1000 * codec_divider) // si le headroom est trop grand, on calcule l'écart.
+					cnt += len; // on additione le nombre de sample ou il y a eu un headroom supérieur a 1000
+				else
+					cnt = 0;
+
+				if (lserver == server) // on compte le nombre de boucle que l'on a fait sans qu'aucun sample n'ai été joué.
+					cnt2++;
+				else
+					cnt2 = 0;
+
+				if (cnt > 1000 * codec_divider && cnt2 < 2) { // si la position de la tete de lecture n'a pas bougé durant 2 tours et que le nombre de sample ou le headroom a été supérieur à 1000 est > 1000
+					todo = headroom - 625 * codec_divider;
+					try {
+						//android.util.Log.d("RECV", "cut");
+						sleep(20);
+					} catch (InterruptedException e) {
+						// TODO Auto-generated catch block
+						e.printStackTrace();
+					}
+				}
+				user += track.write(lin,0,len);
+				m = 1;
+				seq = (frame.getPkt()[0]).getSeqNumber();
+				DataFramePool.getInstance().returnFrame(frame);
+				//println("headroom " + headroom + " user " + user + " server " + server + " luser " + luser + " lserver " + lserver);
+				if (user >= luser + sampling_rate) {
+					if (am.getMode() != speakermode) {
+						am.setMode(speakermode);
+						switch (speakermode) {
+						case AudioManager.MODE_IN_CALL:
+							/*am.setStreamVolume(AudioManager.STREAM_RING,(int)(
+									am.getStreamMaxVolume(AudioManager.STREAM_RING)*
+									com.mbdsys.sfrdroid.ui.Settings.getEarGain()), 0);
+							track.setStereoVolume(AudioTrack.getMaxVolume()*
+									com.mbdsys.sfrdroid.ui.Settings.getEarGain()
+									,AudioTrack.getMaxVolume()*
+									com.mbdsys.sfrdroid.ui.Settings.getEarGain());*/
+							//running = false;
+						case AudioManager.MODE_NORMAL:
+							track.setStereoVolume(AudioTrack.getMaxVolume(),AudioTrack.getMaxVolume());
+							//running = false;
+						}
+					}
+					luser = user;
+				}
+				lserver = server;
+				System.arraycopy(lin, 0, lin2, 0, REAL_BUFFER_SIZE);
+			}
+		}
+		println("POOL SIZE " + DatagramPool.getInstance().getPoolSize());
+		track.stop();
+		//if (Receiver.pstn_state == null || Receiver.pstn_state.equals("IDLE"))
+		//	am.setMode(AudioManager.MODE_NORMAL);
+		saveVolume();
+		am.setStreamVolume(AudioManager.STREAM_MUSIC,oldvol,0);
+		restoreSettings();
+		ToneGenerator tg = new ToneGenerator(AudioManager.STREAM_RING,ToneGenerator.MAX_VOLUME/4*3);
+		tg.startTone(ToneGenerator.TONE_PROP_PROMPT);
+		try {
+			Thread.sleep(500);
+		} catch (InterruptedException e) {
+		}
+		tg.stopTone();
+		rtpSession = null;
+		track = null;
+		codec.cleanDecoder(codecCtx);
+		codec = null;
+		println("rtp receiver terminated");
+	}
+
+	public void endReceiver() {
+		track.stop();
+		//if (Receiver.pstn_state == null || Receiver.pstn_state.equals("IDLE"))
+		//	am.setMode(AudioManager.MODE_NORMAL);
+		saveVolume();
+		am.setStreamVolume(AudioManager.STREAM_MUSIC,oldvol,0);
+		restoreSettings();
+		ToneGenerator tg = new ToneGenerator(AudioManager.STREAM_RING,ToneGenerator.MAX_VOLUME/4*3);
+		tg.startTone(ToneGenerator.TONE_PROP_PROMPT);
+		try {
+			Thread.sleep(500);
+		} catch (InterruptedException e) {
+		}
+		tg.stopTone();
+		rtpSession = null;
+		track = null;
+		codec.cleanDecoder(codecCtx);
+		codec = null;
+		println("rtp receiver terminated");
+	}
+
+	/** Debug output */
+	static int i = 0;
+	private static void println(String str) {
+		System.out.println("RtpStreamReceiver "+ i++ +": " + str);
+	}
+
+	public static int byte2int(byte b) { // return (b>=0)? b : -((b^0xFF)+1);
+		// return (b>=0)? b : b+0x100;
+		return (b + 0x100) % 0x100;
+	}
+
+	public static int byte2int(byte b1, byte b2) {
+		return (((b1 + 0x100) % 0x100) << 8) + (b2 + 0x100) % 0x100;
+	}
+
+	@Override
+	public int frameSize(int payloadType) {
+		// TODO Auto-generated method stub
+		return 0;
+	}
+
+	@Override
+	public void receiveData(DataFrame frame, Participant participant) {
+		// TODO Auto-generated method stub
+
+	}
+
+	@Override
+	public void userEvent(int type, Participant[] participant) {
+		// TODO Auto-generated method stub
+
+	}
+
+}