How to use render_profile method in prospector

Best Python code snippet using prospector_python

render.py

Source: render.py Github

copy

Full Screen

1#!/​usr/​bin/​env python2import argparse3import collections4import math5import multiprocessing6from PIL import Image7from random import random8import time9""" Things you need to know to render a scene """10Render_Profile = collections.namedtuple('Render_Profile',11 'name width height \12 rays_per_pixel max_bounce')13class V3(object):14 """ Element of a 3 dimensional vector space """15 def __init__(self, x=0, y=0, z=0):16 try:17 self.x = float(x)18 self.y = float(y)19 self.z = float(z)20 except Exception as e:21 print e22 print x, y, z23 def __str__(self):24 return '<V3>({0.x:.3}, {0.y:.3}, {0.z:.3})'.format(self)25 def __mul__(self, a):26 ret = V3()27 ret.x = self.x*a28 ret.y = self.y*a29 ret.z = self.z*a30 return ret31 def __rmul__(self, a):32 return self*a33 def __add__(self, a):34 ret = V3()35 ret.x = self.x+a.x36 ret.y = self.y+a.y37 ret.z = self.z+a.z38 return ret39 def __sub__(self, a):40 ret = V3()41 ret.x = self.x-a.x42 ret.y = self.y-a.y43 ret.z = self.z-a.z44 return ret45 def tuple(self):46 return (self.x, self.y, self.z)47def lerp(a, t, b):48 """ linear interpolation49 0 <= t <= 150 return a value between a and b porportional to t51 """52 return (1.0 - t)*a + t*b53def Hadamard(a, b):54 """ Hadamard product55 return the entrywise product of two inputs56 """57 return V3(a.x*b.x, a.y*b.y, a.z*b.z)58def Inner(a, b):59 """ The inner/​dot product """60 return a.x*b.x + a.y*b.y + a.z*b.z61def LengthSq(v3):62 """ The square of the length of the vector """63 return Inner(v3, v3)64def NoZ(v3, e=0.0001):65 """ Normalize or Zero66 Normalize the vector if it is big enough, otherwize return the 0 vector67 """68 ret = V3()69 lensq = LengthSq(v3)70 if(lensq > e**2):71 ret = v3 * (1.0 /​ math.sqrt(lensq))72 return ret73def Cross(a, b):74 """ The cross product (or vector product) "a x b" """75 ret = V3()76 ret.x = a.y*b.z - a.z*b.y77 ret.y = a.z*b.x - a.x*b.z78 ret.z = a.x*b.y - a.y*b.x79 return ret80def Linear1ToRGB255(c):81 """ Map a V3(0..1) v3 to int V3(0..255) """82 ret = V3()83 ret.x = int(255*math.sqrt(c.x))84 ret.y = int(255*math.sqrt(c.y))85 ret.z = int(255*math.sqrt(c.z))86 return ret87def Gamma(linearV3):88 """ "gamma" correction for a linear V3 """89 gamma = V3(*[e*12.92 if e < 0.0031308 else 1.055*e**(1.0/​2.4)-0.05590 for e in linearV3.tuple()])91 return gamma92class World(object):93 """ All of the objects and materials in a scene """94 def __init__(self):95 self.default_material = Material()96 self.planes = list()97 self.spheres = list()98class Sphere(object):99 """ Round, in 3 dimensions """100 def __init__(self, v3, radius, material):101 self.center = v3102 self.radius = radius103 self.material = material104class Plane(object):105 """ Flat, in 2 dimensions """106 def __init__(self, n, d, material):107 self.n = n108 self.d = d109 self.material = material110class Material(object):111 """ The thing things are made of """112 def __init__(self, emit_color=V3(), refl_color=V3(), scatter=0.0):113 self.emit_color = emit_color114 self.refl_color = refl_color115 self.scatter = scatter116def cast_ray(world, render_profile, ray_origin, ray_dir):117 """ Cast a ray into the world """118 result = V3(0, 0, 0)119 attenuation = V3(1, 1, 1)120 min_hit_distance = 0.001121 tolerance = 0.0001122 for _ in xrange(render_profile.max_bounce):123 hit_dist = 10**100124 hit_material = None125 next_normal = None126 for plane in world.planes:127 denom = Inner(plane.n, ray_dir)128 if abs(denom) > tolerance:129 t = (- plane.d - Inner(plane.n, ray_origin)) /​ denom130 if 0 < t < hit_dist:131 hit_dist = t132 hit_material = plane.material133 next_normal = plane.n134 for sphere in world.spheres:135 sphere_origin_translate = ray_origin - sphere.center136 a = Inner(ray_dir, ray_dir)137 b = 2.0*Inner(ray_dir, sphere_origin_translate)138 c = Inner(sphere_origin_translate, sphere_origin_translate) \139 - sphere.radius**2140 denom = 2*a141 sqrd = max(0, b*b-4*a*c)142 root_term = math.sqrt(sqrd)143 if root_term > tolerance:144 pos = (-b + root_term) /​ denom145 neg = (-b - root_term) /​ denom146 t = pos147 if min_hit_distance < neg < pos:148 t = neg149 if min_hit_distance < t < hit_dist:150 hit_dist = t151 hit_material = sphere.material152 next_normal = NoZ(t*ray_dir + sphere_origin_translate)153 if hit_material is not None:154 result += Hadamard(attenuation, hit_material.emit_color)155 cos_atten = Inner(ray_dir*-1, next_normal)156 cos_atten = max(0, cos_atten)157 attenuation = Hadamard(attenuation, cos_atten *158 hit_material.refl_color)159 ray_origin += hit_dist * ray_dir160 pure_bounce = ray_dir - 2*Inner(ray_dir, next_normal)*next_normal161 random_bounce = NoZ(next_normal + V3(random()*2-1,162 random()*2-1,163 random()*2-1))164 ray_dir = NoZ(lerp(random_bounce,165 hit_material.scatter,166 pure_bounce))167 else:168 result += Hadamard(attenuation, world.default_material.emit_color)169 break170 return result171Work_Order = collections.namedtuple('Work_Order', 'world render_profile \172 x_min_px x_max_px y_min_px y_max_px')173def render_worker(idnum, in_queue, out_queue):174 """ Process the given work queue175 Grab an item from the work queue and render the portion of the image176 """177 while not in_queue.empty():178 try:179 work_order = in_queue.get_nowait()180 except Exception as e:181 print idnum, "Bad get in in_queue", e182 time.sleep(random.rand())183 continue184 render_profile = work_order.render_profile185 img = Image.new('RGB', (work_order.x_max_px-work_order.x_min_px,186 work_order.y_max_px-work_order.y_min_px),187 "blue")188 camera_pos = V3(0, -10, 1)189 camera_z = NoZ(camera_pos)190 camera_x = NoZ(Cross(camera_z, V3(0, 0, 1)))191 camera_y = NoZ(Cross(camera_z, camera_x))192 image_width = render_profile.width193 image_height = render_profile.height194 film_dist = 1.0195 film_w = 1.0196 film_h = 1.0197 # Match the film aspect ratio to match the image198 if image_width > image_height:199 film_h = film_w * image_height/​image_width200 else:201 film_w = film_h * image_width/​image_height202 film_center = camera_pos - film_dist*camera_z203 pix_width = 1.0 /​ image_width204 pix_height = 1.0 /​ image_height205 pixels = img.load()206 for x in xrange(work_order.x_min_px, work_order.x_max_px):207 film_x = -1.0+2.0*x/​image_width208 for y in range(work_order.y_min_px, work_order.y_max_px):209 film_y = -1.0+2.0*y/​image_height210 color = V3()211 # Cast multiple rays and composite them equally212 fraction = 1.0/​render_profile.rays_per_pixel213 for _ in xrange(render_profile.rays_per_pixel):214 # add a < 1 px jitter to each ray215 off_x = film_x + (random()*2-1)*pix_width/​2.0216 off_y = film_y + (random()*2-1)*pix_height/​2.0217 film_p = film_center - off_x*film_w/​2.0*camera_x + \218 off_y * film_h/​2.0 * camera_y219 ray_origin = camera_pos220 ray_dir = NoZ(film_p - camera_pos)221 result = cast_ray(work_order.world, render_profile,222 ray_origin, ray_dir)223 color += result*fraction224 pixel = Gamma(color)225 pixel = Linear1ToRGB255(pixel)226 try:227 pixels[x-work_order.x_min_px,228 y-work_order.y_min_px] = pixel.tuple()229 except Exception as e:230 print e231 out_queue.put((work_order, img))232def load_world():233 """ Return a populated world object """234 world = World()235 p = Plane(V3(0, 0, 1), 0, Material(V3(0, 0, 0), V3(0.5, 0.5, 0.5), 0.0))236 world.planes.append(p)237 world.default_material = Material(V3(0.3, 0.4, 0.5), V3(0, 0, 0), 0.0)238 world.spheres.append(Sphere(V3(0, 0, 0), 1.0,239 Material(V3(0, 0, 0), V3(0.7, 0.5, 0.3), 0.0)))240 world.spheres.append(Sphere(V3(3, -2, 0), 1.0,241 Material(V3(2.0, 0.0, 0.0), V3(0, 0, 0), 0.0)))242 world.spheres.append(Sphere(V3(-2, -1, 2), 1.0,243 Material(V3(0, 0, 0), V3(0.2, 0.8, 0.2), 0.7)))244 world.spheres.append(Sphere(V3(1, -1, 3), 1.0,245 Material(V3(0, 0, 0), V3(0.4, 0.8, 0.9), 0.85)))246 world.spheres.append(Sphere(V3(-2, 3, 0), 2.0,247 Material(V3(0, 0, 0),248 V3(0.95, 0.95, 0.95), 1.0)))249 return world250def render(profile, thread_count):251 """ Use the given render profile and thread cound to render """252 img = Image.new('RGB', (profile.width, profile.height), "black")253 world = load_world()254 start_time = time.time()255 # Set the tile width to be a power of two256 tile_width = 1257 while 2*tile_width <= img.size[0]/​math.sqrt(thread_count):258 tile_width *= 2259 tile_height = tile_width260 tile_count_x = (img.size[0] + tile_width - 1) /​ tile_width261 tile_count_y = (img.size[1] + tile_width - 1) /​ tile_height262 print "Chunking: %d threads with %d %dx%d tiles" % \263 (thread_count, tile_count_x*tile_count_y, tile_width, tile_height)264 job_queue = multiprocessing.Queue()265 image_queue = multiprocessing.Queue()266 for tile_x in xrange(tile_count_x):267 x_min = tile_x*tile_width268 x_max = min(img.size[0], x_min + tile_width)269 for tile_y in xrange(tile_count_y):270 y_min = tile_y*tile_height271 y_max = min(img.size[1], y_min + tile_height)272 work_order = Work_Order(world, profile, x_min, x_max, y_min, y_max)273 job_queue.put(work_order)274 procs = list()275 for n in xrange(thread_count):276 proc = multiprocessing.Process(target=render_worker,277 args=(n, job_queue, image_queue))278 proc.start()279 procs.append(proc)280 for x in xrange(tile_count_x):281 for y in xrange(tile_count_y):282 work_order, tile = image_queue.get()283 img.paste(tile, (work_order.x_min_px, work_order.y_min_px))284 end_time = time.time()285 casting_time = (end_time - start_time)*1000286 print "Raycasting time: %dms" % casting_time287 return img288def main():289 parser = argparse.ArgumentParser(description='A Simple Ray Tracer')290 parser.add_argument('-t', '--threads', type=int,291 default=multiprocessing.cpu_count(), nargs='?',292 help='Number of threads to use')293 parser.add_argument('-y', '--height', type=int, default=1080/​4, nargs='?',294 help='Image Height in pixels')295 parser.add_argument('-x', '--width', type=int, default=1920/​4, nargs='?',296 help='Image Width in pixels')297 parser.add_argument('-rpp', '--rays_per_pixel', type=int, default=1,298 nargs='?', help='Image Height in pixels')299 parser.add_argument('-mb', '--max_bounce', type=int, default=4, nargs='?',300 help='Image Width in pixels')301 args = parser.parse_args()302 profile = Render_Profile('test', args.width, args.height,303 args.rays_per_pixel, args.max_bounce)304 print "Rendering...",305 img = render(profile, args.threads)306 print "done."307 img.save('output.png')308 img.show()309if __name__ == "__main__":...

Full Screen

Full Screen

profile_urls.py

Source: profile_urls.py Github

copy

Full Screen

1from django.urls import path2from ..views.profile_views import render_profile3urlpatterns = [4 # Просмотр профиля5 path('', render_profile.profile_view, name='profile'),6 path('loading', render_profile.loading, name='loading'),7 path('new_notif/​', render_profile.new_notification),8 # Удалить приглашение9 path('notification/​<int:notification_id>/​remove/​', render_profile.remove_invite),10 # Отметить уведомления просмотренными11 path('viewing/​<int:notification_id>', render_profile.mark_notification_as_viewed),...

Full Screen

Full Screen

setup.py

Source: setup.py Github

copy

Full Screen

...6src_dir = etc.src()7if (not src_dir.exists()):8 os.makedirs(src_dir)9subprocess.run(["pwsh", "-Command", "\"Install-Module posh-git -Force\""])10def render_profile(directory, name="Profile.ps1"):11 toolbox_dir = src_dir /​ "toolbox"12 print("Rendering {} to {}...".format(name, directory))13 etc.render_mustache("profile.mustache", directory, name, toolbox_dir=toolbox_dir.as_posix())14if etc.is_windows():15 render_profile(etc.home() /​ "Documents" /​ "WindowsPowerShell")16 render_profile(etc.home() /​ "Documents" /​ "PowerShell")17else:18 render_profile(etc.home() /​ ".config" /​ "powershell", "profile.ps1")...

Full Screen

Full Screen

Blogs

Check out the latest blogs from LambdaTest on this topic:

Test Optimization for Continuous Integration

“Test frequently and early.” If you’ve been following my testing agenda, you’re probably sick of hearing me repeat that. However, it is making sense that if your tests detect an issue soon after it occurs, it will be easier to resolve. This is one of the guiding concepts that makes continuous integration such an effective method. I’ve encountered several teams who have a lot of automated tests but don’t use them as part of a continuous integration approach. There are frequently various reasons why the team believes these tests cannot be used with continuous integration. Perhaps the tests take too long to run, or they are not dependable enough to provide correct results on their own, necessitating human interpretation.

A Complete Guide To CSS Houdini

As a developer, checking the cross browser compatibility of your CSS properties is of utmost importance when building your website. I have often found myself excited to use a CSS feature only to discover that it’s still not supported on all browsers. Even if it is supported, the feature might be experimental and not work consistently across all browsers. Ask any front-end developer about using a CSS feature whose support is still in the experimental phase in most prominent web browsers. ????

Different Ways To Style CSS Box Shadow Effects

Have you ever visited a website that only has plain text and images? Most probably, no. It’s because such websites do not exist now. But there was a time when websites only had plain text and images with almost no styling. For the longest time, websites did not focus on user experience. For instance, this is how eBay’s homepage looked in 1999.

Fluent Interface Design Pattern in Automation Testing

Recently, I was going through some of the design patterns in Java by reading the book Head First Design Patterns by Eric Freeman, Elisabeth Robson, Bert Bates, and Kathy Sierra.

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run prospector automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful