Monte Carlo Ray Tracing

Ben Sattelberg, October 29, 2019

In this notebook is an implementation of a recursive Monte Carlo ray tracer.

In [1]:
import numpy as np
import multiprocessing
import time
import itertools

from IPython import display

from sage.repl.image import Image
from sage.plot.plot3d.shapes import *
from sage.plot.plot3d.base import SHOW_DEFAULTS
SHOW_DEFAULTS['aspect_ratio'] = (1,1,1)
SHOW_DEFAULTS['frame_aspect_ratio'] = (1,1,1)
SHOW_DEFAULTS['perspective_depth'] = false

Some helper methods are useful for later calculations. Making vectors unit length is done very frequently, having access to a random vector sampled uniformly from the unit sphere makes the Lambertian bouncing easier to work with, and having functionality for calculating specular reflection ray directions makes mirror-like objects easier to work with.

In [2]:
def make_unit(x) :
    return x / np.linalg.norm(x)

def random_in_unit_sphere():
    while True:
        p = np.random.uniform(-1, 1, size=3)
        if np.dot(p, p) < 1:
            return p
        
def reflect(direction, normal):
    return direction - 2*np.dot(direction, normal)*normal

The camera is almost exactly the same as in previous notebooks - the only difference is that the pixel_ray functionality has been moved into it.

In [3]:
class Camera :
    def __init__(self, eye, look, up, bnds, near, width, height) :
        self.eye   = np.array(eye)
        self.look  = np.array(look)
        self.up    = np.array(up)
        self.umin  = bnds[0]
        self.umax  = bnds[1]
        self.vmin  = bnds[2]
        self.vmax  = bnds[3]
        self.near  = near
        self.width = width
        self.height = height
        self.setupUVW()
        
    def setupUVW(self) :
        Wv = make_unit(self.eye - self.look)   
        Uv = make_unit(np.cross(self.up, Wv))    
        self.V = np.cross(Wv,Uv)
        self.U = Uv
        self.W = Wv
        
    def pixel_ray(self, i, j) :
        px = RR(i/(self.width  - 1)*(self.umax - self.umin) + self.umin)
        py = RR(j/(self.height - 1)*(self.vmin - self.vmax) + self.vmax)
        Lv = self.eye + (self.near * self.W) + (px * self.U) + (py * self.V)
        Dv = Lv - self.eye
        return Ray(Lv, Dv)

The ray class is the same as in previous notebooks.

In [4]:
class Ray :
    def __init__(self, origin, direction) :
        self.origin = np.array(origin)
        self.direction = make_unit(np.array(direction))

The sphere class is equivalent to the globe class in previous notebooks. The ray intersection functionality has been moved here for ease of use, and now returns a dictionary hit record.

In [5]:
class Sphere :
    def __init__(self, center, radius, material) :
        self.center = np.array(center)       # Sphere center as a vector
        self.radius = float(radius)          # Sphere radius
        self.material = material             # Index for sphere's material
        
    def get_normal(self, point):
        return make_unit(point - self.center)
    
    def ray_intersect(self, ray):
        toCenter = np.array(self.center - ray.origin)
        v = np.dot(toCenter, ray.direction)
        csq = np.dot(toCenter, toCenter)
        disc = self.radius^2 - (csq - v^2)
        
        if (disc > 0) :
            tval = v - sqrt(disc)
            if tval > 0.00001:
                hit_point = ray.origin + tval*ray.direction
                normal = make_unit(hit_point - self.center)
                ret = {'t':tval, 'point':hit_point, 'normal':normal, 'material':self.material}
                return ret
        else :
            return None

Materials all inherit from a main "Material" class and have a scattered function that returns the ray that bounces off them and the albedo and an emitted function that returns the amount of light the object emits.

In [6]:
class Material: 
    def scatter(self, ray, hit_record):
        return None, None
    def emitted(self):
        return np.array([0.0, 0.0, 0.0])
    
class Light(Material):
    def __init__(self, emittance):
        self.emittance = np.array(emittance)
    def emitted(self):
        return self.emittance
        
class Lambertian(Material):
    def __init__(self, albedo):
        self.albedo = np.array(albedo)
    def scatter(self, ray, hit_record):
        direction = make_unit(hit_record['normal'] + random_in_unit_sphere())
        scattered = Ray(hit_record['point'], direction)
        return scattered, self.albedo
        
class Mirror(Material):
    def __init__(self, albedo):
        self.albedo = np.array(albedo)
    def scatter(self, ray, hit_record):
        reflected = reflect(ray.direction, hit_record['normal'])
        scattered = Ray(hit_record['point'], reflected)
        return scattered, self.albedo

Set up the scene.

In [7]:
# Camera control is important specifying: 
#    eye, look, up, bnds, near, width, height
# There are two cameras. Be careful, the second takes many hours
cam = Camera((-4,0.25,0), (1,0,0),(0,1,0),(-4,4,-4,4),-4,64,64)
#cam = Camera((-4,0.25,0), (1,0,0),(0,1,0),(-4,4,-4,4),-4,256,256)

mats = [Light((3.0, 3.0, 3.0)),
        Lambertian((0.9, 0.9, 0.9)), # Front, top, and bottom spheres
        Lambertian((0.3, 0.9, 0.3)), # Left sphere
        Lambertian((0.9, 0.3, 0.3)), # Right sphere
        Lambertian((0.3, 0.9, 0.3)), # Behind sphere
        Lambertian((0.3, 0.3, 0.9)), # Lambertian sphere
        Mirror((0.9, 0.9, 0.9))] # Metal sphere
        
objects = []
size = 1e10

objects.append(Sphere((0, 600+5-0.003, 0), 600, mats[0])) # Light

objects.append(Sphere((size+5, 0, 0), size, mats[1])) # Front sphere
objects.append(Sphere((0, size+5, 0), size, mats[1])) # Top sphere
objects.append(Sphere((0, -size-5, 0), size, mats[1])) # Bottom sphere

objects.append(Sphere((0, 0, -size-5), size, mats[2])) # Left sphere
objects.append(Sphere((0, 0, size+5), size, mats[3])) # Right sphere

objects.append(Sphere((3, -3.75, -2), 1.25, mats[6])) # Metal sphere

objects.append(Sphere((2.5, -3.75, 2), 1.25, mats[5])) # Lambertian sphere

max_depth = 50

This is a simple function to determine that closest object that a ray hits, if any.

In [8]:
def check_collisions(objects, ray):
    closest = None
    for obj in objects:
        hit = obj.ray_intersect(ray)
        if hit:
            if closest is None or hit['t'] < closest['t']:
                closest = hit
    return closest

This function is analagous to in previous notebooks, without the calculations for explicit light sampling. An optimization technique is also used for efficiency (note that it increases quality over many samples, and can result in poor results for low numbers of samples).

In [9]:
def color(ray, objects, depth, curAlbedo):
    hit = check_collisions(objects, ray)
    if hit:
        scatter, albedo = hit['material'].scatter(ray, hit)
        emitted = hit['material'].emitted()
        if depth < max_depth and scatter:
            curAlbedo = np.multiply(albedo, curAlbedo)
            
            # Ealry ray culling if this path has low albedo
            if np.max(curAlbedo) < np.random.uniform(0, 1):
                return np.array([0.0, 0.0, 0.0])
            # Boost surviving rays to prevent bias
            curAlbedo = curAlbedo/np.max(curAlbedo)
            
            return np.multiply(curAlbedo, emitted) + color(scatter, objects, depth+1, curAlbedo)
        else:
            return np.multiply(curAlbedo, emitted)
    else:
        return np.multiply(curAlbedo, np.array([0.0, 0.0, 0.0]))

This function exists purely so that the Python multiprocessing functionality can be used by mapping the i and j coordinates for each pixel into this helper.

In [10]:
def helper(coords):
    i, j = coords
    ray = cam.pixel_ray(i, j)
    rgb = color(ray, objects, 0, np.array([1.0, 1.0, 1.0]))
    return (i, j), rgb

Do the actual ray tracing.

In [11]:
vals = np.zeros((cam.width, cam.height, 3))
img = Image('RGB', (cam.width, cam.height), 'black')
pix = img.pixels()

startTime = time.time()

numSamples      = 0
maxSamples      = 1000
displayInterval = 10
writeInterval   = 50


# Python's multiprocessing does not respond well to interrupts,
# espcially when in a notebook
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())

while numSamples < (maxSamples + 1):
    numSamples += 1
    
    # Use this for multiprocessing
    pixelRenders = pool.map_async(helper, itertools.product(range(cam.width), range(cam.height))).get(999999)
    # Use this for a single core
    #pixelRenders = map(helper, itertools.product(range(cam.width), range(cam.height)))
    
    for elem in pixelRenders:
        i, j = elem[0]
        rgb = elem[1]
        vals[i,j, :] += rgb
        
    for i in range(cam.width):
        for j in range(cam.width):
            # The sqrt here is for gamma encoding
            pix[i, j] = tuple(map(lambda(x) : ZZ(max(0,min(255,round(255.0 * np.sqrt(x/numSamples))))), vals[i, j, :]))
    if mod(numSamples,displayInterval) == 0 :        
        display.clear_output(wait=True)
        print 'Number of samples: ' +  str(int(numSamples))
        print 'Time taken: ' + str(int(time.time() - startTime)) + ', Per sample: ' + str(int((time.time() - startTime)/numSamples))
        print 'Number of cores: ' + str(multiprocessing.cpu_count())
        img.show()
    if mod(numSamples,writeInterval) == 0 :
        img.save("cs410lec17n02.png")
Number of samples: 1000
Time taken: 2307, Per sample: 2
Number of cores: 8
In [ ]: