本文整理汇总了Python中viz.link函数的典型用法代码示例。如果您正苦于以下问题:Python link函数的具体用法?Python link怎么用?Python link使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了link函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
def main():
global masterTimerG, recordingTimer
setUp()
masterTimerG = vizact.ontimer(0, masterTimer)
#recordingTimer = vizact.ontimer(0, recordCurrentSubjectStatus)
if not KEYBOARD_NAVIGATE:
addKeyPresses()
else:
# Setup keyboard/mouse tracker
tracker = vizcam.addWalkNavigate(moveScale=10.0)
tracker.setPosition([0,1.8,0])
viz.link(tracker,viz.MainView)
viz.mouse.setVisible(False)
def printLocation():
print viz.MainView.getPosition(), viz.MainView.getEuler()
vizact.onkeydown('p', printLocation)
#Add Wiimote support
# wii = viz.add('wiimote.dle')
# #Connect to first available wiimote
# wiimote = wii.addWiimote()
# wiimote.led = wii.LED_4
# #Add button functions
# vizact.onsensordown(wiimote,wii.BUTTON_A,TurnFlyingOn)
# vizact.onsensordown(wiimote,wii.BUTTON_B,TurnFlyingOff)
# vizact.onsensordown(wiimote,wii.BUTTON_1,endChildHappy)
#
#Hacky bug fix below, I apologize -Mark
if not FLYING_F and YOKE_F:
viztask.schedule(fixStartHeliPos)
示例2: _phone_ring
def _phone_ring(phone_node):
global phone_light
global enabled
print 'phone will be ringing now'
phone_node.playsound(RINGTONE_FILE)
# phone_light
if phone_light:
if not enabled:
phone_light.enable()
enabled = True
else:
phone_light.disable()
enabled = False
else:
phone_light = vizfx.addSpotLight(scene=SCENE)
pos = phone_node.getPosition()
euler = phone_node.getEuler()
phone_light.position(pos[0],pos[1],pos[2])
phone_light.setEuler(-euler[0],euler[1],euler[2])
phone_light.direction(1,1,0)
phone_light.spread(10)
phone_light.intensity(2)
phone_light.spotexponent(1)
viz.link(phone_light, phone_node)
phone_light.enable()
enabled = True
示例3: setUpSound
def setUpSound():
global windAudio, endingAudio, aboveLocation, endingAudioBasic, trumpetAudio
if USE_HMD_F:
#Adjusted head location to spatalize towards center of room
#Next, create 4 subviews to attach sounds to around the room for whooshing
userView = viz.addView()
userView.setPosition(0,1.6,0)
headLocation = viz.addGroup()
viz.link(userView, headLocation)
#Good adjustment for reverb and room, might not work as well for the city
#vizsonic.setReverb (30.0, 0.2, 0.5, 0.9, 0.1)
vizsonic.setSimulatedRoomRadius(30,30)
vizsonic.setShaker(1.0)
viz.setOption('sound3d.useViewRotation', 0)
#Set auarlizer to play towards center of room
viz.setListenerSound3D(headLocation)
#Turn on sound debugging?
viz.setDebugSound3D(False)
#Turn on windy city sounds
vizsonic.setAmbient('windy_edit.wav', 0.75, 0)
#Configure wind playing based on speed, play at higher location
aboveView = viz.addView()
aboveView.setPosition(0,3,0)
aboveLocation = viz.addGroup()
viz.link(aboveView, aboveLocation)
windAudio = aboveLocation.playsound('windMono.wav', viz.STOP, volume=WIND_MIN_VOLUME)
trumpetAudio = aboveLocation.playsound('audio/hero_trumpet.wav', viz.STOP, volume = 0.75)
chooseExpStateNLoadSound()
示例4: _setupRift
def _setupRift(PPT1, OH):
import oculus
global hmd
hmd = oculus.Rift()
if PPT1:
_setupHeadTrackers(hmd, OH)
else:
viz.link(hmd.getSensor(), MainView, mask=viz.LINK_ORI)
示例5: addUser
def addUser():
global mainUser
# ---- Trackers ----
# Initialize an empty composite object to store all the trackers
# The composite.storeTracker() method is used to combine the individual trackers for the user's body within the composite
composite = VU.VUCompositeTrackers()
vrpn = viz.add('vrpn7.dle')
headPos = vrpn.addTracker( '[email protected]'+PPT_MACHINE,PPT_HEAD_ID)
if not OPTICAL_HEADING:
iLabs = viz.addExtension( 'inertiallabs.dle' )
headOri = iLabs.addSensorBus(port=INERTIALLABS_HEAD_PORT)[0]
VU.onkeydownspecial('r', resetHead, headOri, 90 )
# ---- Display ----
import sensics
sensics.zSight_60()
if not OPTICAL_HEADING:
headTracker = viz.mergeLinkable( headPos, headOri )
else:
headTracker = headPos
composite.storeTracker (composite.HEAD, headTracker )
viz.setOption('viz.fullscreen', 1 ) # Go fullscreen on monitor 1
viz.setOption('viz.fov', [37.5, 1.25]) # Set fov to match sensics specs
viz.setOption('viz.setDisplayMode', [1280,1024]) # Change resolution of displays
# ---- Input ----
wandpos = vrpn.addTracker('[email protected]' + PPT_MACHINE, PPT_WAND_ID)
wandori = iLabs.addSensorBus(port=INERTIALLABS_HAND_PORT)[0]
wandtracker = viz.mergeLinkable( wandpos, wandori )
# wandjoy = VU.VUJoystickPPTWandVRPN(hostname=PPT_MACHINE, markerid=PPT_WAND_ID+1)
# wandflyer = VU.VUTrackerWandFlyerSmooth(wandjoy, wandtracker,accelerationSteps=Config.WAND_ACCELERATION_STEPS, decelerationSteps=Config.WAND_DECELERATION_STEPS, speed=Config.WAND_SPEED_SCALE, keystrokes=[Config.WAND_BUTTON1,Config.WAND_BUTTON2,Config.WAND_BUTTON3,Config.WAND_BUTTON4,Config.WAND_BUTTON5,Config.WAND_BUTTON6],buttonReset=None, buttonForward=None, buttonFist=None, oriSteer=False )
# wandflyer.getHandSensor().joystick = wandjoy
# composite.addDriverNode(wandflyer)
# composite.storeTracker( composite.RHAND, wandtracker )
#the following is beta:
global main_sphere
composite.storeTracker(composite.RHAND,wandtracker)
viz.link(wandtracker, main_sphere)
#end beta.
#not sure if u need this in beta: composite.createRightHand(wandori)
VU.onkeydownspecial('r', resetHand, wandori, 90 )
# composite.storeTracker( composite.RHAND, wandtracker )
#composite.createRightHand(wandori)
# viz.link(wandtracker,ball)
# ---- Avatar ----
composite.createAvatarNone()
# ---- Finalize Composite ----
composite.finishTrackers() # Build up internal links for all the tracking devices
composite.defineViewpoint() # Attach viewpoint to default location on the user
mainUser = composite
manager.addComposite(mainUser, 'Main-User')
示例6: _setupVive
def _setupVive(PPT1, OH, **kwargs):
import steamvr
global hmd
hmd = steamvr.HMD(**kwargs)
if PPT1:
viveViewLink = viz.link(hmd.getSensor(), viz.MainView)
else:
viveViewLink = viz.link(hmd.getSensor(), viz.MainView)
示例7: addBusLight
def addBusLight(scene=viz.MainScene):
busLightModel = viz.add("Bus Light.vzf", scene=scene)
light = viz.addLight(scene=scene, group=1)
viz.link(busLightModel, light)
light.position(0, 0, 0, 1)
busLightModel.emissive(viz.WHITE)
busLightModel.setEuler([0, 90.0, 0])
light.color(viz.WHITE)
intensity = 3
light.intensity(intensity)
light.quadraticattenuation(1)
return busLightModel
示例8: addAimer
def addAimer(imageFile='crosshair.png', size=.3, scene=viz.MainScene, incomingColor = viz.WHITE):
# print 'ADD AIMER CALLED'
global crosshair
global crosshairLink
# scene=viz.MainScene
if vhil_devkit.hmd is not None:
hud = vhil_devkit.hmd.getHUD()
crosshairPos = [0.0, 0.0, 1.0]
crosshairScale = 0.05
crosshair = viz.addText3D("O", pos=crosshairPos, parent=hud)
crosshairScale = 0.025
crosshair.setScale(crosshairScale, crosshairScale, crosshairScale)
crosshair.drawOrder(1000000)
if incomingColor is viz.GREEN:
crosshairScale = 0.05
crosshair.setScale(crosshairScale, crosshairScale, crosshairScale)
shift = -0.01
crosshairPos = [shift, shift, 1.0]
crosshair.setPosition(crosshairPos)
crosshair.color(incomingColor)
crosshair.disable(viz.INTERSECTION)
crosshair.disable(viz.LIGHTING)
crosshair.disable(viz.DEPTH_TEST)
# crosshair = viz.addTexQuad(texture=viz.add(imageFile), size=size, scene=scene, parent=hud)
crosshairLink = viz.link(viz.MainView, crosshair)
crosshairLink.disable()
# crosshairLink.preTrans([0,0,4])
# crosshairLink.preTrans([0,0,1.0])
# crosshair.disable(viz.INTERSECTION)
# crosshair.disable(viz.LIGHTING)
#
# # Appear above other nodes
# crosshair.drawOrder(100)
# crosshair.disable(viz.DEPTH_TEST)
print "HERE"
else:
crosshair = viz.addTexQuad(texture=viz.add(imageFile), size=size, scene=scene)
crosshairLink = viz.link(viz.MainView, crosshair)
crosshairLink.preTrans([0,0,4])
crosshair.disable(viz.INTERSECTION)
crosshair.disable(viz.LIGHTING)
# Appear above other nodes
crosshair.drawOrder(100)
crosshair.disable(viz.DEPTH_TEST)
return crosshair, crosshairLink
示例9: _setupRift
def _setupRift(PPT1, OH, **kwargs):
import oculus
global hmd
hmd = oculus.Rift(**kwargs)
if PPT1:
oculusViewLink = _setupHeadTrackers(hmd, OH)
else:
if RIFT_CAM:
oculusViewLink = viz.link(hmd.getSensor(), MainView, mask=viz.LINK_POS|viz.LINK_ORI)
oculusViewLink.postTrans(HOME_POS)
else:
oculusViewLink = viz.link(hmd.getSensor(), MainView, mask=viz.LINK_ORI)
oculusViewLink.postEuler(HOME_ORI)
示例10: configureSound
def configureSound():
global subview
subview = viz.addView()
subview.setPosition(0,1.6,-1)
headLocation = viz.addGroup()
viz.link(subview, headLocation)
vizsonic.setReverb (6.0, 0.2, 0.5, 0.9, 0.1)
vizsonic.setSimulatedRoomRadius(3,1.5)
vizsonic.setShaker(1.0)
viz.setOption('sound3d.useViewRotation', 0)
#Set auarlizer to play towards center of room
viz.setListenerSound3D(subview)
#Turn on sound debugging?
viz.setDebugSound3D(False)
示例11: addPoliceLights
def addPoliceLights(scene=viz.MainScene):
policeLight = vizfx.addSpotLight(scene=scene)
policeLight.setEuler(-105, 0, 0)
policeLight.color(viz.WHITE)
policeLight.emissive(viz.WHITE)
policeLight.spotexponent(1)
policeLight.quadraticattenuation(1)
policeLight.intensity(10)
orb = vizshape.addSphere(radius=0.4, flipFaces=True, scene=scene)
orb.color(viz.WHITE)
orb.emissive(viz.WHITE)
orb.alpha(1)
viz.link(policeLight, orb)
return policeLight
示例12: setUpNav
def setUpNav():
global RIFT
global useKeyboard
global viewLink
global home
global navigationNode
if RIFT:
#add oculus settings
hmd = oculus.Rift()
viz.link(hmd.getSensor(), viz.MainView)
viewLink = viz.link(navigationNode, viz.MainView)
viewLink.setOffset(home)
viewLink.preMultLinkable(hmd.getSensor())
else:
vizcam.addWalkNavigate()
示例13: addAppendages
def addAppendages(avatar, numberOfArms):
global chestHorn, leftShoulderHorn, rightShoulderHorn, headHorn
#adding virtual models of arms
if some_global_variables.isLeftHanded == 0:
#right-handed
some_global_variables.appendage3 = addArm(PATH_TO_STUFF + "skin_tones/thirdArm.png", resources.RIGHT_ARM)
some_global_variables.appendage4 = addArm(PATH_TO_STUFF + "skin_tones/fourthArm.png", resources.LEFT_ARM)
some_global_variables.appendage5 = addArm(PATH_TO_STUFF + "skin_tones/fifthArm.png", resources.RIGHT_ARM)
else:
#left-handed
some_global_variables.appendage3 = addArm(PATH_TO_STUFF + "skin_tones/thirdArm.png", resources.LEFT_ARM)
some_global_variables.appendage4 = addArm(PATH_TO_STUFF + "skin_tones/fourthArm.png", resources.RIGHT_ARM)
some_global_variables.appendage5 = addArm(PATH_TO_STUFF + "skin_tones/fifthArm.png", resources.LEFT_ARM)
TouchCube.thirdAppendage = some_global_variables.appendage3
#adding horns for attaching the arms to
chestHorn = viz.addGroup()
chestHornLink = viz.link(avatar.getBone('Bip01 Spine2'), chestHorn)
chestHornLink.preTrans([0.0, 0.0, 0.0])
chestHornLink.preEuler([-90.0, 0.0, 0.0])
leftShoulderHorn = viz.addGroup()
leftShoulderHornLink = viz.link(avatar.getBone('Bip01 L Clavicle'), leftShoulderHorn)
leftShoulderHornLink.preTrans([-0.2, 0.0, 0.1])
leftShoulderHornLink.preEuler([-90.0, 0.0, 0.0])
rightShoulderHorn = viz.addGroup()
rightShoulderHornLink = viz.link(avatar.getBone('Bip01 R Clavicle'), rightShoulderHorn)
rightShoulderHornLink.preTrans([0.2, 0.0, 0.1])
rightShoulderHornLink.preEuler([-90.0, 0.0, 0.0])
headHorn = viz.addGroup()
headHornLink = viz.link(avatar.getBone('Bip01 Head'), headHorn)
headHornLink.preTrans([0.0, 0.2, 0.1])
headHornLink.preEuler([-90.0, 0.0, 0.0])
#setting the horns for fourth and fifth arms
if some_global_variables.isLeftHanded == 0:
#right-handed
some_global_variables.appendage4.setParent(leftShoulderHorn)
else:
#left-handed
some_global_variables.appendage4.setParent(rightShoulderHorn)
some_global_variables.appendage5.setParent(headHorn)
#switching horn attachment to the appendages based on the number of arms to be used
switchControlSchema(numberOfArms)
示例14: grabActionOnThirdAppendage
def grabActionOnThirdAppendage(grabTrigger):
global thirdAppendage, thirdAppendageReadyForGrab, thirdAppendageGrabbed, ghostAvatar, matrixOfThirdAppendage, grabLink, posOfThirdAppendage, tahLink
if (thirdAppendageReadyForGrab is True) and (grabTrigger is True):
#Grabbing the third appendage now
ExitProximity(True)
thirdAppendageGrabbed = True
thirdAppendageReadyForGrab = False
matrixOfThirdAppendage = thirdAppendage.getMatrix(mode=viz.ABS_GLOBAL)
posOfThirdAppendage = thirdAppendage.getPosition()
tahLink.disable()
#grabLink = viz.grab(pptextensionDK2.rhPPT, thirdAppendage)
grabLink = viz.link(pptextensionDK2.rhPPT, thirdAppendage)
grabLink.setMask(viz.LINK_POS_OP)
grabLink.reset(viz.RESET_POS)
grabLink.setSrcFlag(viz.ABS_LOCAL)
grabLink.setDstFlag(viz.ABS_PARENT)
#grabLink.swapPos(-3, 2, -1)
grabLink.postEuler([-90, 0, 0], target = viz.LINK_POS_OP)
#grabLink.postTrans([0.3, 0, 0.1], target = viz.LINK_POS_OP)
#just the following two statements will work alone, but not very accurate
#grabLink.postEuler([-90, 0, 0], target = viz.LINK_POS_OP)
#grabLink.setOffset([1, 0, 1.5])
# ghostAvatar.getBone('Bip01 R Forearm').getPosition(viz.ABS_GLOBAL))
else:
thirdAppendageGrabbed = False
if grabLink:
grabLink.remove()
grabLink = None
#thirdAppendage.setMatrix(matrixOfThirdAppendage)
tahLink.enable()
thirdAppendage.setPosition(posOfThirdAppendage)
示例15: __init__
def __init__(self):
viz.mouse.setVisible(viz.OFF)
#Activate NVIS HMD
nvis.nvisorSX111()
#nvis.nvisorSX60()
viz.cursor(viz.OFF)
#isense = viz.add('intersense.dls')
vrpn = viz.add('vrpn7.dle')
view = viz.MainView
self.markers = []
headMarker = vrpn.addTracker('[email protected]' + PPT_HOSTNAME, 0)
self.markers.append(headMarker)
self.markers.append( vrpn.addTracker('[email protected]' + PPT_HOSTNAME, 1) )
self.markers.append( vrpn.addTracker('[email protected]' + PPT_HOSTNAME, 2) )
self.markers.append( vrpn.addTracker('[email protected]' + PPT_HOSTNAME, 3) )
self.markers.append( vrpn.addTracker('[email protected]' + PPT_HOSTNAME, 4) )
filter = viz.add("filter.dle")
headMarker_filter = filter.average(headMarker, samples = 7)
headPos = viz.link(headMarker_filter, view, priority = 0)
headPos.setOffset(DEFAULT_OFFSET)
self.posLink = headPos
#self.posLink.postScale(DEFAULT_SCALE)
self.headMarker = headMarker