本文整理汇总了Java中org.appcelerator.titanium.util.Log.d方法的典型用法代码示例。如果您正苦于以下问题:Java Log.d方法的具体用法?Java Log.d怎么用?Java Log.d使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.appcelerator.titanium.util.Log
的用法示例。
在下文中一共展示了Log.d方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: pause
import org.appcelerator.titanium.util.Log; //导入方法依赖的package包/类
public void pause()
{
try {
if (mp != null) {
am.setMode(AudioManager.USE_DEFAULT_STREAM_TYPE);
am.setSpeakerphoneOn(true);
if(mp.isPlaying()) {
if (DBG) {
Log.d(LCAT,"audio is playing, pause");
}
//if (remote) {
stopProgressTimer();
//}
mp.pause();
paused = true;
setState(STATE_PAUSED);
}
}
} catch (Throwable t) {
Log.w(LCAT, "Issue while pausing : " , t);
}
}
示例2: startRecognize
import org.appcelerator.titanium.util.Log; //导入方法依赖的package包/类
protected void startRecognize() {
Log.d(LCAT, "startRecognize");
Intent intent = null;
switch (this.getAction()) {
case SpeechrecognizerModule.RECOGNIZE:
intent = getRecognizeSpeechIntent();
break;
case SpeechrecognizerModule.WEBSERACH:
intent = getWebSearchIntent();
break;
case SpeechrecognizerModule.HANDSFREE:
intent = getVoiceSearchHandsFree();
break;
}
if (intent != null) {
mSpeechRecognizer.startListening(intent);
}
}
示例3: getRecognizeSpeechIntent
import org.appcelerator.titanium.util.Log; //导入方法依赖的package包/类
private Intent getRecognizeSpeechIntent() {
Log.d(LCAT, "getRecognizeSpeechIntent");
Intent intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
if (getLangmodel() == SpeechrecognizerModule.FREEFORM) {
intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
} else if (this.getLangmodel() == SpeechrecognizerModule.WEBSEARCH) {
intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
RecognizerIntent.LANGUAGE_MODEL_WEB_SEARCH);
}else{
intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
}
/*
* if (getPrompt() != null) {
* intent.putExtra(RecognizerIntent.EXTRA_PROMPT, getPrompt()); }
*/
if (getLangtag() != null) {
intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, getLangtag());
}
intent.putExtra(RecognizerIntent.EXTRA_MAX_RESULTS, getMaxresult());
intent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE, TiApplication
.getInstance().getPackageName());
return intent;
}
示例4: release
import org.appcelerator.titanium.util.Log; //导入方法依赖的package包/类
public void release()
{
try {
if (mp != null) {
mp.setOnCompletionListener(null);
mp.setOnErrorListener(null);
mp.setOnBufferingUpdateListener(null);
mp.setOnInfoListener(null);
mp.setOnPreparedListener(null);
/*
* Restore default stream type
*/
if (android.os.Build.VERSION.SDK_INT > android.os.Build.VERSION_CODES.KITKAT) {
am.setMode(AudioManager.USE_DEFAULT_STREAM_TYPE);
am.setSpeakerphoneOn(true);
}
mp.release();
mp = null;
if (DBG) {
Log.d(LCAT, "Native resources released.");
}
remote = false;
}
} catch (Throwable t) {
Log.w(LCAT, "Issue while releasing : " , t);
}
}
示例5: onInfo
import org.appcelerator.titanium.util.Log; //导入方法依赖的package包/类
@Override
public boolean onInfo(MediaPlayer mp, int what, int extra)
{
String msg = "OnInfo Unknown media issue.";
switch(what)
{
case MediaPlayer.MEDIA_INFO_BAD_INTERLEAVING :
msg = "Stream not interleaved or interleaved improperly.";
break;
case MediaPlayer.MEDIA_INFO_NOT_SEEKABLE :
msg = "Stream does not support seeking";
break;
case MediaPlayer.MEDIA_INFO_UNKNOWN :
msg = "Unknown media issue";
break;
case MediaPlayer.MEDIA_INFO_VIDEO_TRACK_LAGGING :
msg = "Video is too complex for decoder, video lagging."; // shouldn't occur, but covering bases.
break;
case MediaPlayer.MEDIA_INFO_METADATA_UPDATE:
msg = "Video metadata update.";
break;
}
Log.d(LCAT, "Error " + msg);
KrollDict data = new KrollDict();
data.put(TiC.PROPERTY_CODE, 0);
data.put(TiC.PROPERTY_MESSAGE, msg);
proxy.fireEvent(EVENT_ERROR, data);
return true;
}
示例6: onBufferingUpdate
import org.appcelerator.titanium.util.Log; //导入方法依赖的package包/类
@Override
public void onBufferingUpdate(MediaPlayer mp, int percent)
{
if (DBG)
{
Log.d(LCAT, "Buffering: " + percent + "%");
}
KrollDict data = new KrollDict();
data.put("percent", percent);
proxy.fireEvent(EVENT_BUFFERING, data);
}
示例7: onPrepared
import org.appcelerator.titanium.util.Log; //导入方法依赖的package包/类
@Override
public void onPrepared(MediaPlayer arg0) {
Log.d(LCAT, "In onPrepared");
// TODO Auto-generated method stub
//this.setState(STATE_INITIALIZED);
//startPlay();
}
示例8: handleCreationDict
import org.appcelerator.titanium.util.Log; //导入方法依赖的package包/类
@Override
public void handleCreationDict(KrollDict options)
{
super.handleCreationDict(options);
if (options.containsKey("message")) {
Log.d(LCAT, "example created with message: " + options.get("message"));
}
}
示例9: handleEvent
import org.appcelerator.titanium.util.Log; //导入方法依赖的package包/类
@Override
public void handleEvent(DownloadEvent event) {
// TODO Auto-generated method stub
Log.d(LCAT, "Download Paused ");
KrollDict dict = createDict(event.getDownloadInformation());
self.fireEvent(EVENT_PAUSED, dict);
//TiMessenger.sendBlockingMainMessage(handler.obtainMessage(MSG_FIRE_PAUSED, dict));
}
示例10: handleCreationDict
import org.appcelerator.titanium.util.Log; //导入方法依赖的package包/类
@Override
public void handleCreationDict(KrollDict options)
{
super.handleCreationDict(options);
if (options.containsKey("message")) {
Log.d(TAG, "example created with message: " + options.get("message"));
}
}
示例11: createSpeechRecognizer
import org.appcelerator.titanium.util.Log; //导入方法依赖的package包/类
protected void createSpeechRecognizer() {
if (!TiApplication.isUIThread()) {
Log.d(LCAT, "handleCreationDict not on UI Thread");
TiMessenger.sendBlockingMainMessage(handler
.obtainMessage(MSG_CREATESPEECHRECOGNIZER));
} else {
Log.d(LCAT, "handleCreationDict on UI Thread");
createSpeechRecognizerSynch();
}
}
示例12: getWebSearchIntent
import org.appcelerator.titanium.util.Log; //导入方法依赖的package包/类
private Intent getWebSearchIntent() {
Log.d(LCAT, "getWebSearchIntent");
Intent intent = new Intent(RecognizerIntent.ACTION_WEB_SEARCH);
if (getLangmodel() == SpeechrecognizerModule.FREEFORM) {
intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
} else if (this.getLangmodel() == SpeechrecognizerModule.WEBSEARCH) {
intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
RecognizerIntent.LANGUAGE_MODEL_WEB_SEARCH);
}else{
intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
}
/*
* if (getPrompt() != null) {
* intent.putExtra(RecognizerIntent.EXTRA_PROMPT, getPrompt()); }
*/
if (getLangtag() != null) {
intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, getLangtag());
}
intent.putExtra(RecognizerIntent.EXTRA_MAX_RESULTS, getMaxresult());
intent.putExtra(RecognizerIntent.EXTRA_PARTIAL_RESULTS,
getIsPartialresult());
if (Build.VERSION.SDK_INT >= 11) {// API Level 11 EXTRA_WEB_SEARCH_ONLY
intent.putExtra(RecognizerIntent.EXTRA_WEB_SEARCH_ONLY,
getIsWebsearchonly());
}
if (Build.VERSION.SDK_INT >= 14) {// API Level 14 EXTRA_ORIGIN
if (getOrigin() != null) {
intent.putExtra(RecognizerIntent.EXTRA_ORIGIN, getOrigin());
}
}
intent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE, TiApplication
.getInstance().getPackageName());
return intent;
}
示例13: getVoiceSearchHandsFree
import org.appcelerator.titanium.util.Log; //导入方法依赖的package包/类
private Intent getVoiceSearchHandsFree() {
Log.d(LCAT, "getVoiceSearchHandsFree");
if (Build.VERSION.SDK_INT >= 16) {
Intent intent = new Intent(
RecognizerIntent.ACTION_VOICE_SEARCH_HANDS_FREE);
intent.putExtra(RecognizerIntent.EXTRA_SECURE, getIsSecure());
intent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,
TiApplication.getInstance().getPackageName());
return intent;
}
return null;
}
示例14: release
import org.appcelerator.titanium.util.Log; //导入方法依赖的package包/类
@Kroll.method
public void release() {
Log.d(LCAT, "release");
if (mSpeechRecognizer != null) {
mSpeechRecognizer.destroy();
mSpeechRecognizer = null;
}
}
示例15: initialize
import org.appcelerator.titanium.util.Log; //导入方法依赖的package包/类
protected void initialize()
throws IOException
{
try {
setState(STATE_STARTING);
mp = new MediaPlayer();
String url = TiConvert.toString(proxy.getProperty(TiC.PROPERTY_URL));
if (URLUtil.isAssetUrl(url)) {
String path = url.substring(TiConvert.ASSET_URL.length());
AssetFileDescriptor afd = null;
try {
afd = context.getAssets().openFd(path);
// Why mp.setDataSource(afd) doesn't work is a problem for another day.
// http://groups.google.com/group/android-developers/browse_thread/thread/225c4c150be92416
mp.setDataSource(afd.getFileDescriptor(), afd.getStartOffset(), afd.getLength());
} catch (IOException e) {
Log.e(LCAT, "Error setting file descriptor: ", e);
} finally {
if (afd != null) {
afd.close();
}
}
} else {
Uri uri = Uri.parse(url);
if (uri.getScheme().equals(TiC.PROPERTY_FILE)) {
mp.setDataSource(uri.getPath());
} else {
Log.d(LCAT,"audio is a remote url." + url);
// IceCastScraper ic = new IceCastScraper();
// List<Stream> streams = ic.scrape(URI.create(url));
// String song = streams.get(0).getCurrentSong();
// String songUrl = streams.get(0).getUri().toString();
// Log.d(LCAT,"audio song. " + song);
// Log.d(LCAT,"audio URL. " + songUrl);
//
// ShoutCastScraper sc = new ShoutCastScraper();
// List<Stream> streams = sc.scrape(URI.create(url));
//
// Log.d(LCAT,"streams count " + streams.size());
//
// String song = streams.get(0).getCurrentSong();
// String songUrl = streams.get(0).getUri().toString();
// Log.d(LCAT,"audio song. " + song);
// Log.d(LCAT,"audio URL. " + songUrl);
remote = true;
mp.setDataSource(url);
}
}
setSpeakerphoneOn(); // Configures initial audio routing
mp.setLooping(looping);
mp.setOnCompletionListener(this);
mp.setOnErrorListener(this);
mp.setOnInfoListener(this);
mp.setOnBufferingUpdateListener(this);
mp.setOnPreparedListener(this);
mp.prepare(); // Probably need to allow for Async
setState(STATE_INITIALIZED);
setVolume(volume);
if (proxy.hasProperty(TiC.PROPERTY_TIME)) {
setTime(TiConvert.toInt(proxy.getProperty(TiC.PROPERTY_TIME)));
}
} catch (Throwable t) {
Log.w(LCAT, "Issue while initializing : " , t);
release();
setState(STATE_STOPPED);
}
}