import bpy import math import mathutils import os import sys import numpy as np #lancer avec ./blender-2.74-linux-glibc211-x86_64/blender --python ./Swirski-EyeModel.py -b #****** CREATE DIRECTORY WHERE TO SAVE IMAGES *********** img_dir = '/tmp/images/' if not os.path.exists(img_dir): os.makedirs(img_dir) bpy.ops.wm.open_mainfile(filepath="/media/HD500GO/blender/Swirski-EyeModel.blend") scene = bpy.context.scene armature = bpy.data.objects['Armature Head'] camera_obj = bpy.data.objects['Camera'] camera = bpy.data.cameras['Camera'] eyeL = bpy.data.objects['eye.L'] pupilGroup = eyeL.vertex_groups['eyepulpex.L'] pupilVertices = [v for v in eyeL.data.vertices if pupilGroup.index in [g.group for g in v.groups]] pupil_base_radius = max((v1.co - v2.co).length for v1 in pupilVertices for v2 in pupilVertices) * eyeL.scale[1] eyeLbone = armature.pose.bones['def_eye.L'] pupilLbone = armature.pose.bones['eyepulpex.L'] def strVec(vec): return "({},{},{})".format(vec[0], vec[1], vec[2]) datafilepath = os.path.join(os.path.dirname(bpy.data.filepath), "render_eye_data.txt") # switch on nodes scene.use_nodes = True tree = scene.node_tree links = tree.links # clear default nodes for n in tree.nodes: tree.nodes.remove(n) # create input render layer node rl = tree.nodes.new('CompositorNodeRLayers') rl.location = 185,285 # create output node v = tree.nodes.new('CompositorNodeViewer') v.location = 750,210 v.use_alpha = False # create output node of_c_node = tree.nodes.new('CompositorNodeOutputFile') of_c_node.location = 600, 200 #of_node.base_path = of_c_node.format.file_format = 'PNG' # Links links.new(rl.outputs[0], v.inputs[0]) # link Image output to Viewer input links.new(rl.outputs[0], of_c_node.inputs[0]) # Define path where to save image base_path = "/tmp/images" #"./images_test_png" with open(datafilepath, "w") as datafile: #for frame in range(scene.frame_end+1): for frame in range(2): scene.frame_set(frame) #modification position de visée de l'oeil print('Jessica et Bertrand sont passés par la'); armature.pose.bones['eyetargetparent'].location[0] = -0.113456 camera_mat = camera_obj.matrix_world * mathutils.Matrix([[1,0,0,0],[0,-1,0,0],[0,0,-1,0],[0,0,0,1]]) camera_invmat = camera_mat.inverted() armature_mat = armature.matrix_world head_world = armature_mat*eyeLbone.head tail_world = armature_mat*eyeLbone.tail head_cam = camera_invmat*head_world tail_cam = camera_invmat*tail_world eye_centre = head_cam eye_radius = eyeLbone.bone.length pupil_gaze = (tail_cam - head_cam).normalized() pupil_radius = pupilLbone.scale[0] * pupil_base_radius pupil_centre = eye_centre + eye_radius*pupil_gaze #framestr = "{} | head_world: {} | tail_world: {} | vec_world: {} | head_cam: {} | tail_cam: {} | vec_cam {}".format( # frame, *[strVec(x) for x in [head_world,tail_world,vec_world,head_cam,tail_cam,vec_cam]]) framestr = "{} {} {} {} {} {}".format( frame, strVec(eye_centre), eye_radius, strVec(pupil_centre), strVec(pupil_gaze), pupil_radius ) print(framestr) print(framestr, file=datafile) #************* RENDER AND SAVE IMAGES *********** # Define image resolution for rendering scene.render.resolution_x = 250 scene.render.resolution_y = 250 # resolution percentage : have to be 100% to have the whole image resolution defined earlier scene.render.resolution_percentage = 100 # Activate which camera will be used for rendering # (if more than one camera are defined, the rendering have to be repeated for each camera) scene.camera = bpy.data.objects["Camera"] # get viewer pixels directly #pixels = bpy.data.images['Viewer Node'].pixels #print(len(pixels)) # size is always width * height * 4 (rgba) # copy buffer to numpy array for faster manipulation # arr = np.array(pixels[:]) # print('one pixel \n',arr[100:104]) # Define path where to save images str4 = base_path + "/image" of_c_node.base_path = str4 + "000" #out_z_node.base_path = str4 +"_Z_"+ "000" # Render bpy.ops.render.render()#write_still=True) #************* EXIT BLENDER *********** sys.exit(0)