Deploying Node apps on Amazon EC2 Micro with Stunnel and HAProxy

Recently, @cimm and me deployed a beta version of JSLogger. We encountered some unexpected issues on the IT (dark) side that delayed our launch. Load balancers and SSL support can be a pain in the as* if you don’t deal with it on a daily basis.

The challenge

JSLogger was built on ExpressJS, a wonderful NodeJS framework. Mongoose wraps the database calls to MongoDB where the auth user data and the log entries are stored.

We used to store the app session data in a RedisStore but we recently switched to MongoStore as it makes life much easier when struggling with one type of database only.

Our app Node server looks like this:

/**
 * JSLogger manager
 *
 * app.js
 */

var express = require('express');
mongooseAuth = require('mongoose-auth');
var everyauth = require('./node_modules/mongoose-auth/node_modules/everyauth');
everyauth.debug = process.env.NODE_ENV === 'production' ? false : true;
var MongoStore = require('connect-mongo')(express);
var mongoAuthDbName = 'jslogger_auth_' + (process.env.NODE_ENV || "development");
var gzippo = require('gzippo');

require('./models/db_connect');

// TODO Figure out how to include the mongooseAuth.middleware() without instatiating a User before
User = require('./models/user');

var oneYear = 31557600000;
var gzippoOptions = process.env.NODE_ENV === 'production' ? {clientMaxAge: oneYear, maxAge: oneYear} : {contentTypeMatch: /none/};
var connectAssetsOptions = process.env.NODE_ENV === 'production' ? {src: __dirname + '/public', minifyBuilds: true} : {src: __dirname + '/public'};
var staticRenderer = process.env.NODE_ENV === 'production' ? gzippo.staticGzip(__dirname + '/public', gzippoOptions) : express.static(__dirname + '/public');

var app = module.exports = express.createServer(
 express.bodyParser(),
 express.methodOverride(),
 express.cookieParser(),
 express.session({secret: 'jsloggersecretkey83', store: new MongoStore({db: mongoAuthDbName})}),
 require('stylus').middleware({src: __dirname + '/public'}),
 staticRenderer,
 require('connect-assets')(connectAssetsOptions),
 mongooseAuth.middleware()
);


// Configuration

app.configure(function(){
  app.set('views', __dirname + '/views');
  app.set('view engine', 'jade');
});

app.configure('development', function(){
  app.use(express.errorHandler({ dumpExceptions: true, showStack: true }));
});

app.configure('production', function(){
  app.use(express.errorHandler());
});


// Routes
require('./routes/auth')(app);
require('./routes/site')(app);
require('./routes/manage')(app);

mongooseAuth.helpExpress(app);

app.listen(process.env.PORT || 5000);
console.log("JSLogger web manager listening on port %d in %s mode", app.address().port, app.settings.env);

The code above starts JSLogger Manager on port 5000, the part of the app that is responsible for delivering static content.

Two more Node servers run on port 6987 and 6988 that are responsible for logging the entries. One logger handles the HTTP requests:

/**
 * JSLogger engine
 *
 * logger.js
 */

var express = require('express');

require('./models/db_connect');

var app = module.exports = express.createServer();

// Configuration

app.configure(function(){
  app.use(express.bodyParser());
  app.use(express.methodOverride());
});

app.configure('development', function(){
  app.use(express.errorHandler({ dumpExceptions: true, showStack: true }));
});

app.configure('production', function(){
  app.use(express.errorHandler());
});

// Routes

require('./routes/logger')(app);

app.listen(process.env.PORT || 6987);
console.log("JSLogger engine listening on port %d in %s mode", app.address().port, app.settings.env);

And the other logger that runs on port 6988 handles the HTTPS requests, dealing with the SSL connections:

/**
 * JSLogger engine on SSL
 *
 * logger_ssl.js
 */

var express = require('express'),
    fs = require('fs');

require('./models/db_connect');

var ca = fs.readFileSync('cert/sub.class1.server.ca.pem').toString();
var privateKey = fs.readFileSync('cert/jslogger.com.key').toString();
var certificate = fs.readFileSync('cert/ssl.pem').toString();

var app = module.exports = express.createServer({
  ca : ca,
  key: privateKey,
  cert: certificate
});

// Configuration

app.configure(function(){
  app.use(express.bodyParser());
  app.use(express.methodOverride());
});

app.configure('development', function(){
  app.use(express.errorHandler({ dumpExceptions: true, showStack: true }));
});

app.configure('production', function(){
  app.use(express.errorHandler());
});

// Routes

require('./routes/logger')(app);

app.listen(process.env.PORT || 6988);
console.log("JSLogger engine listening on port %d in %s mode", app.address().port, app.settings.env);

Creating an SSL Node server with ExpressJS is really easy as you can see in the example above.

So, three Node servers had to run on one dedicated server with MongoDB and SSL support.

Choosing a server

First, we tried to host the project on a Mediatemple dedicated server we already owned and had other sites already running on. It seemed to be great in the beginning, but after the project started to grow, the Node server was reaching the memory limit of 512MB in no time, especially when building and compiling the assets in production mode.

As JSLogger was consuming the entire server memory, slowing down the other sites running on that server as well, we had to move to another solution. More than that, The DV server on Mediatemple runs CentOS by default, a Linux distribution that we are not confortable with. Apache is there by default as well, I needed to get rid of it.

I’ve ended up launching an Amazon EC2 Micro instance with an Ubuntu Server 12 LTS on it for JSLogger. An EC2 Micro instance seems to be enough for JSLogger.

Installing the tools

It took me a couple of minutes to generate a script and install all the tools needed for my project on Ubuntu (kudos to Ruslan Khissamov): http://apptob.org/.

Our three Node servers were running smoothly on port 5000, 6987 and 6988.

Keeping my Node app alive with Upstart and Monit

There are a couple of good tools to monitor your Node app and keep it running but I’m used to Upstart and Monit as I worked with them before in production. There is a really great tutorial about how to install and configure them by Tim Smart.

The tricky part is to configure your Upstart and Monit configs properly. JSLogger Upstart configs look like this:

#!upstart

# /etc/init/jslogger_manager.conf
description "jslogger manager"
author      "doomhz"

start on startup
stop on shutdown

script
    export HOME="/home/ubuntu"

    echo $$ > /var/run/jslogger_manager.pid
    cd /home/ubuntu/jslogger/; NODE_ENV=production /usr/local/bin/node /home/ubuntu/jslogger/app.js >> /home/ubuntu/log/upstart/jslogger_manager.sys.log 2>&1
end script

pre-start script
    # Date format same as (new Date()).toISOString() for consistency
    echo "[`date -u +%Y-%m-%dT%T.%3NZ`] (sys) Starting" >> /home/ubuntu/log/upstart/jslogger_manager.sys.log
end script

pre-stop script
    rm /var/run/jslogger_manager.pid
    echo "[`date -u +%Y-%m-%dT%T.%3NZ`] (sys) Stopping" >> /home/ubuntu/log/upstart/jslogger_manager.sys.log
end script

The ones for JSLogger Engine and SSL look the same.

After setting up the Upstart I can easily start and stop my app with:

sudo start jslogger_manager
sudo stop jslogger_manager

Monit will guard your app and keep it running. This is the config:

#/etc/monit/conf.d/jslogger_manager

#!monit
set logfile /home/ubuntu/log/monit/jslogger_manager.log

check process nodejs with pidfile "/var/run/jslogger_manager.pid"
    start program = "/sbin/start jslogger_manager"
    stop program  = "/sbin/stop jslogger_manager"
    if failed port 5000 protocol HTTP
        request /
        with timeout 10 seconds
        then restart

And for the Logger Engine:

#/etc/monit/conf.d/jslogger_logger

#!monit
set logfile /home/ubuntu/log/monit/jslogger_logger.log

check process nodejs_logger with pidfile "/var/run/jslogger_logger.pid"
    start program = "/sbin/start jslogger_logger"
    stop program  = "/sbin/stop jslogger_logger"
    if failed port 6987 protocol HTTP
        request /
        with timeout 10 seconds
        then restart

Set up Monit demon to run a check each 60 seconds:

sudo monit -d 60 -c /etc/monit/monitrc

Start and stop your apps with Monit:

sudo monit stop all
sudo monit start all

Btw, if you get an annoying error like “monit: Cannot connect to the monit daemon. Did you start it with http support?” when starting Monit, than you should check your configs in /etc/monit/monitrc and enable (uncomment) the http support:

  set httpd port 2812 and
     use address localhost  # only accept connection from localhost
     allow localhost        # allow localhost to connect to the server and
     allow admin:monit      # require user 'admin' with password 'monit'
     allow @monit           # allow users of group 'monit' to connect (rw)
     allow @users readonly  # allow users of group 'users' to connect readonly

Routing HTTP traffic with HAProxy

It would be nice to make JSLogger run on port 80. There are a couple of good tools that I’ve already worked with and can do that: Apache, Nginx. First, I’ve installed Nginx to take care of routing the traffic from port 80 to 5000, where the JSLogger Manager is running. Everything seemed to be fine except that the Wesockets did not work anymore. SocketIO could not connect to my app anymore, it was always falling back to Ajax long polling.
Apparently Nginx can’t handle Websockets properly yet. There is a patch for it but it’s seemed to be buggy at that moment.
Luckily HAProxy exists, an amazing tool to route your traffic and that takes care of the Websockets. I followed this tutorial to install it.

Installing v 1.5 of HAProxy on Ubuntu:

# Install HAProxy
cd ~
wget http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev6.tar.gz
tar xzf haproxy-1.5-dev6.tar.gz
cd haproxy*
sudo make install

The JSLogger HAProxy config looks like this:

#/etc/haproxy/haproxy.cfg

global
        log 127.0.0.1   local0 notice
        maxconn 4096
        daemon
        #debug
        #quiet


defaults
        log     global
        mode    http
        option  httplog
        option  dontlognull
        retries 3
        option redispatch
        maxconn 2000
        contimeout      5000
        clitimeout      50000
        srvtimeout      50000


frontend all 0.0.0.0:80
  timeout client 86400000
  default_backend jslogger_manager
  acl is_websocket hdr(upgrade) -i websocket
  acl is_websocket hdr_beg(host) -i ws
  redirect prefix http://jslogger.com code 301 if { hdr(host) -i www.jslogger.com } 
  
  use_backend jslogger_manager if is_websocket


backend jslogger_manager
  option forwardfor
  timeout server 86400000
  timeout connect 4000
  server nodejs 127.0.0.1:5000 weight 1 maxconn 10000 check

Test and run HAProxy:

# Test haproxy config
haproxy -c -f /etc/haproxy/haproxy.cfg

# Run HAProxy
haproxy -f /etc/haproxy/haproxy.cfg

Handling SSL with Stunnel

Next step was handling the SSL requests. Apparently HAproxy can’t do it. SSL support will be available only in the new version.

Stunnel takes care of it. I spent a couple of hours configuring Stunnel and HAProxy to work together. This was happening because I was following old tutorials. Stunnel didn’t play nicely with HAProxy before, it had to be patched to work. After spending a couple of hours patching old versions I’ve read the Stunnel manual :) They introduced a new PROXY protocol that solves the HAProxy integration.

Install Stunnel:

sudo apt-get install stunnel

Enable Stunnel (set ENABLED flag to 1):

# /etc/default/stunnel
# Julien LEMOINE 
# September 2003

# Change to one to enable stunnel automatic startup
ENABLED=1
FILES="/etc/stunnel/*.conf"
OPTIONS=""

# Change to one to enable ppp restart scripts

Configure Stunnel to route the JSLogger HTTPS traffic to HAproxy:

sslVersion = all
options = NO_SSLv2
pid = /var/run/stunnel.pid
socket = l:TCP_NODELAY=1
socket = r:TCP_NODELAY=1
output = /var/log/stunnel.log
 
[https_default]
accept       = 443
connect      = 80
cert         = /home/ubuntu/jslogger/cert/ssl.pem
key         = /home/ubuntu/jslogger/cert/jslogger.com.key

Restart Stunnel:

sudo /etc/init.d/stunnel4 restart

At this moment the entire traffic from port 443 will be redirected to port 80.

Signed SSL certificate with StartSSL

We needed a valid, signed SSL certificate to make JSLogger serve the jslogger.js script from HTTPS without being blocked by the browser’s security alerts, as it happens when using a self-signed certificate.

I wanted to buy one but @cimm advised me that we don’t have to spend money on an expensive one at the moment, StartSSL can provide a trusted certificate that works fine in all major browsers. There is enough info on their site about how to generate and sign your own certificate.

All done

That’s the entire setup and deployment process of JSLogger. The next step will be to scale it smoothly when needed. HAproxy will take care of load balancing and switching to MongoHQ can easily solve the database scaling.

Sending e-mails with NodeJS and NodeMailer through Google Mail

Sending e-mails with NodeJS is almost a breeze. Almost. First, you have to plug-in the nodemailer module than set up a transport type, load the templates, add attachments and finally send…

The first thing you tend to do is to create a wrapper class to manage all this tasks. So, I wrapped it in an Eamiler class to centralize the mail sending in my app.

# /lib/emailer.coffee

emailer = require("nodemailer")
fs      = require("fs")
_       = require("underscore")

class Emailer

  options: {}

  data: {}

  # Define attachments here
  attachments: [
    fileName: "logo.png"
    filePath: "./public/images/email/logo.png"
    cid: "logo@myapp"
  ]

  constructor: (@options, @data)->

  send: (callback)->
    html = @getHtml(@options.template, @data)
    attachments = @getAttachments(html)
    messageData =
      to: "'#{@options.to.name} #{@options.to.surname}' <#{@options.to.email}>"
      from: "'Myapp.com'"
      subject: @options.subject
      html: html
      generateTextFromHTML: true
      attachments: attachments
    transport = @getTransport()
    transport.sendMail messageData, callback

  getTransport: ()->
    emailer.createTransport "SMTP",
      service: "Gmail"
      auth:
        user: "myappemail@gmail.com"
        pass: "secretpass"

  getHtml: (templateName, data)->
    templatePath = "./views/emails/#{templateName}.html"
    templateContent = fs.readFileSync(templatePath, encoding="utf8")
    _.template templateContent, data, {interpolate: /\{\{(.+?)\}\}/g}

  getAttachments: (html)->
    attachments = []
    for attachment in @attachments
      attachments.push(attachment) if html.search("cid:#{attachment.cid}") > -1
    attachments

exports = module.exports = Emailer

In a standard ExpressJS project structure you’ll store this file in /lib/emailer.coffee
You’ll need to have the email templates stored in /views/emails/ as HTML files and the attachments in /public/images/email/.

A potential email view will look like this:

<!-- invite.html -->
<html>
<head>
  <title>Invite from Myapp</title>
</head>
<body>
  <p>
    Hi {{name}} {{surname}},
  </p>
  <p>
    Myapp would like you to join it's network on <a href="http://myapp.com">Myapp.com</a>.
    <br />
    Please follow the link bellow to register:
  </p>
  <p>
    <a href="http://myapp.com/register?invite={{id}}">http://myapp.com/register?invite={{id}}</a>
  </p>
  <p>
    Thank you,
    <br />
    Myapp Team
  </p>
  <p>
    <a href="http://myapp.com"><img src="cid:logo@myapp" /></a>
  </p>
</body>
</html>

UnderscoreJS template will take care about your variables in the template and the getAttachments() function will automatically attache the files you need by the cid from the template.

To use the class in your code you have to instantiate a new Emailer object with the desired options, the template data and send the email:

options =
  to:
    email: "username@domain.com"
    name: "Rick"
    surname: "Roll"
    subject: "Invite from Myapp"
    template: "invite"

data =
  name: "Rick"
  surname "Roll"
  id: "3434_invite_id"

Emailer = require "../lib/emailer"
emailer = new Emailer options, data
emailer.send (err, result)->
  if err
    console.log err

Using a Mongoose model for the invites you would have something like this:

InviteSchema = new Schema
  email:
    type: String
  name:
    type: String
  surname:
    type: String
  status:
    type: String
    enum: ["pending", "accepted"]
    default: "pending"
  clicks:
    type: Number
    default: 0
  created_at:
    type: Date
    default: Date.now

InviteSchema.methods.send = ()->
  options =
    to:
      email: @email
      name: @name
      surname: @surname
    subject: "Invite from Myapp"
    template: "invite"
  Emailer = require "../lib/emailer"
  emailer = new Emailer options, @
  emailer.send (err, result)->
    if err
      console.log err

Invite = mongoose.model("Invite", InviteSchema)
exports = module.exports = Invite

And you’ll call it from an ExpressJS router:

Invite = require('../models/invite')

module.exports = (app)->

  app.post '/invites', (req, res)->
    data = req.body
    invite = new Invite data
    invite.save ()->
      invite.send()
    res.writeHead(303, {'Location': "/invites"})
    res.end()

  app.get '/invites', (req, res)->
    Invite.find().desc("created_at").run (err, invites)->
      res.render 'invites/invites', {title: "Invites", invites: invites}

That’s all about it.

Your feedback is highly appreciated. Thanks.

Asynchronous Template Loading With tmpload jQuery Plugin and Backbone.js

I prefer keeping my “views” on the frontend side when building a Single Page Application. I use templates to update data on a certain portion of my interface. Using templates gives you the advantage to isolate your logical Javascript code from your presentational part. So, instead of inserting text data into the DOM directly from your View clases like this:

  $('#main').html('<p>Ugly mixed text with code.</p><div>Hello!</div>')

You load a template that you defined earlier and assigned an id to it:

<script type="text/template" id="my-template">
  <p>Ugly mixed text with code.</p>
  <div>Hello {name}!</div>
</script>

Later then, you grab the template and display it:

  $('#main').html($('#my-template').html().replace('{name}', 'Dumitru'));

You can even use a template engine like jQuery Template, Mustache or Underscore.js to pull some variables in your templates and make them more useful. I prefer working with Underscore.js that implements the John Resig’s Microtemplate engine and it plays well with Backbone.js. For that I adjust my template syntax to the Underscore API:

<script type="text/template" id="my-underscore-template">
  <p>Ugly mixed text with code.</p>
  <div>Hello <%=name %>!</div>
</script>

And load my template with the _.template function:

  var compiled = _.template($('#my-underscore-template').html());
  $('#main').html(compiled({name : 'Dumitru'}));

The technic, sounds really nice – you keep the template in your DOM structure and only pick it up when you need it and load you content. The only problem is that when you have a lot of templates, your DOM structure keeps growing and growing and becomes ugly, heavy and hard to maintain. Another problem is that you keep a couple of templates in your DOM that probably the user won’t use them (i.e. templates for editing user details). Wouldn’t it be better to keep the templates in separate files and load them only when you need them? That’s exactly what the jQuery tmpload plugin does for you.

Usually I keep my templates in separate .html files in the public path:

  /public
    /templates
      /users
        login.html
        register.html
        edit.html
      /posts
        view.html
        comment.html
        edit.html

Basically you organize your templates however suits you most. After you created your template and dropped the HTML structure inside, you can load it with tmpload and reuse it:

  $.tmpload({
    url: '/templates/posts/view.html',
    tplWrapper: _.template,
    onLoad: function (compiledTpl) {
      $('#main').html(compiledTpl({name : 'Dumitru'}));
    }
  });

In the example above I load my template from a certain URL and also pass my template engine object to it. This way my plugin will compile the template automatically and send me back the template object that I can use in my onLoad callback. Executing the code in the onLoad callback assures you that the template is there.

Another important thing to notice is that the plugin caches the template so next time when you load the same template it doesn’t do a request to the server to grab it, it keeps it into a variable $.tmpload.templates.

You can still keep you template in your DOM structure and load it with the plugin. This way you don’t even need to execute your code in a callback because you don’t have asynchronous stuff going on:

  var compiledTpl = $.tmpload({
    id: 'my-underscore-template',
    tplWrapper: _.template
  });
  $('#main').html(compiledTpl({name : 'Dumitru'}));

When the template is in the DOM, it loads instantly and gets returned by the plugin. But I recommend using it with the onLoad callback, because it’s easier to refactor it latter when you switch to remote templates.

You also have the ability to set some default options to the plugin, so you don’t specify them latter:

  // somewhere in your app bootstrap
  $.tmpload.defaults.tplWrapper = _.template;

  // then in your code you don't have to specify the template engine anymore
  var compiledTpl = $.tmpload({
    id: 'my-underscore-template'
  });
  $('#main').html(compiledTpl({name : 'Dumitru'}));

Bellow is an example about how I use the templates in my Backbone Views (it’s CoffeeScript code):

  # somewhere in my bootstrap.js file
  $.tmpload.defaults.tplWrapper = _.template


  # then in my View
  class UserView extends Backbone.View
    tplUrl: '/templates/posts/view.html'

    render: ()->
      $el = $(@el)
      userModel = @model
      $.tmpload
        url: @tplUrl
        onLoad: (compiledTpl)->
          htmlData = compiledTpl({user: userModel})
          $el.html(htmlData)


  # after this I instantiate the view in my router
  userView = new UserView()
  userView.render()

If you want to load some templates from the DOM, you can tweak your view like this:

  # then in my View
  class UserView extends Backbone.View
    tplId: 'my-underscore-template'

    render: ()->
      $el = $(@el)
      compiledTpl = $.tmpload
        id: @tplId
      htmlData = compiledTpl({user: @model})
      $el.html(htmlData)

tmpload is a powerful plugin to use along with your SPA. I hope you’ll enjoy it.

Impressions From Full Frontal Javascript Conference 2011

Differently from other Javascript conferences, Full Frontal is supposed to be a one day conference that everyone can afford to go to. It happened in Brighton, a beautiful, small city in the South of the UK.

Remy Sharp is the one who managed the big crowd of over 300 geeks that met that Friday. He received a lot of help from his wife @Julieanne who supports his great geeky ideas.

The conference opened it’s doors at 9:30 in the morning which was a bit harsh for me, dealing with the previous night hangover. Lucky for me I stayed in a nearby Travelodge hotel that was at a 5 minutes distance from the venue.

There was a big crowd at Duke Of York’s already when I arrived at the registration. Everyone was happy to meet familiar faces once again and to share stories about their projects, weird code, ninjas and Web.

In a couple of minutes we proceeded to the main room where all the talks were held. It was a big cinema room with confortable chairs which everyone was really happy about :)

Remy opened the conference, explaining what everyone should do and not do and also announced the speakers. He also brought the sad news that there is no internet at the conference :(

The first speaker was Jeremy Ashkenas, the father of CoffeScript and Backbone.js. As a new version of CoffeeScript was coming soon, he presented the new goodies that this amazing language is providing. I was happy to see that a lot of people in the room were already using it (including me :p ). His conclusion was that languages that compile in Javascript make your life a lot easier as a developer and you should not be afraid to use it.

After a 15 minutes break, Phil Hawksworth told us about Excessive Enhancement and how we can get overloaded today with all the fancy tools around us on the Web and use them wrongly.

Marijn Haverbeke, an awesome guy from Amsterdam, presented his well-known project – CodeMirror. He is also the author of the Eloquent Javascript book – a very good one. The unique part of CodeMirror IDE is that it has an API that you can use everywhere. You can grab the slides from here.

After a break, Nicholas Zakas, who worked at Yahoo! and has a couple of books on Javascript, told us how to structure a Scalable application with Javascript. After his talk I started to have some doubts about my previous frontend apps architecture. :)

Around 14:00 all the geeks went to eat. We found some crappy food with average, expensive English beer in a pub nearby. But the point was to meet new people and discover more about their Web toys, right?…

Rik Arends, the CTO of Cloud9 IDE, shared his knowledge about the massive architecture of the IDE. His advice on how to scale Node apps and how they deal with a massive amount of requests and data were extremely welcome. Cloud9IDE is growing fast and I was surprised to see that a lot of people in the room were already using it (including me :p ).

Glenn Jones presented his amazing projects and how to play with drag and drop between browsers (even IE). Browsers are not totally prepared yet for exchanging data on drag, but it’s worth to give it a try.

Brendan Dawes is a creative director that came to the dev conference to open our eyes and pay attention to things around us, beyond our geeky, cyber life. He explains how weird and unordered things inspire and challenge him.

An amazing talk by Marcin Wichary closed the conference. He is the guy who stands behind the Google Doodles development. It’s very challenging to make those little scripts that should run fine in all browsers and load very fast. Usually there is no elegant solution to do it, so you should do what you gotta do – use hacks to fight the browsers. The most popular Doodle is Pac-Man.

After the conference everyone went to a party where there was a lot of alcohol, with the aim of sharing the impressions about Full Frontal 2011 and, once again, the stories about their projects, weird code, ninjas and Web…

Ah, yes, and here is a picture of the famous Brighton Pier:

Brighton Pier

Brighton Pier

Test parent function calls with Jasmine for Backbone.js and write getters with CoffeeScript

I spent about half an hour figuring out how to test a parent function call with Jasmine on code written with CoffeeScript. And, as usual, it was a stupid mistake.

So, my intension was to rewrite the “get” method of a Backbone Model class that will return a custom attribute. The code in CoffeeScript looks like this:

class Person extends Backbone.Model
  default:
    name:    null
    surname: null
    
  get: (attribute)->
    switch attribute
      when 'fullname' then @getFullname()
      else super attribute

  getFullname: ()->
    "#{@get('name')} #{@get('surname')}"

The model’s core “get” is rewritten here and checks the attribute calls. If the attribute equals “fullname” – the code will return the result of the “getFullname” method of the class. This method will glue the name and surname of the person that are stored as object’s attributes. In all other cases the parent “get” function will be called.

The specs for this code is pretty simple. The ambiguous thing is to spec the CoffeeScript “super” call on “get” method. In CoffeeScript the “super” keyword accesses the prototype of the class. The easiest way to test it is like this:

describe "Person", ()->
  person = undefined
  
  beforeEach ()->
    person = new window.Person()

  describe "get", ()->
    describe "when the full name is requested", ()->
      it "gets the full name", ()->
        spyOn(person, 'getFullname')
        person.get('fullname')
        expect(person.getFullname).toHaveBeenCalled()

    describe "when another attribute is requested", ()->
      it "calls super", ()->
        spyOn(window.Backbone.Model.prototype, 'get')
        person.get('fakeAttribute')
        expect(window.Backbone.Model.prototype.get)
        .toHaveBeenCalledWith('fakeAttribute')

  describe "getFullname", ()->
    ...

In the second describe block we check if the the call arrives to the parent class. That’s a pretty easy but sometimes confusing thing to do. The confusion comes from CoffeeScript that makes you forget that the “super” keyword translates as a prototype call :)

Switch to our mobile site