Merge branch 'master' into feature/nanoc-v4

This commit is contained in:
Jez Cope 2016-03-29 15:12:03 +01:00
commit e693a03ee0
16 changed files with 368 additions and 81 deletions

View File

@ -1 +1 @@
ruby-2.2.1
ruby-2.2.4

View File

@ -1,6 +1,6 @@
source 'https://rubygems.org/'
gem "nanoc", "~> 4.0.0b3"
gem "nanoc"
gem "rack"
gem "adsf"
gem "mime-types"
@ -20,6 +20,7 @@ gem "faker"
gem "therubyracer"
gem "systemu"
gem "rb-readline"
gem "fog"
gem "unf"

View File

@ -1,29 +1,28 @@
GEM
remote: https://rubygems.org/
specs:
CFPropertyList (2.3.1)
activesupport (4.2.4)
CFPropertyList (2.3.2)
activesupport (4.2.6)
i18n (~> 0.7)
json (~> 1.7, >= 1.7.7)
minitest (~> 5.1)
thread_safe (~> 0.3, >= 0.3.4)
tzinfo (~> 1.1)
adsf (1.2.0)
adsf (1.2.1)
rack (>= 1.0.0)
andand (1.3.3)
autoprefixer-rails (6.0.2)
autoprefixer-rails (6.3.5)
execjs
json
bootstrap-sass (3.3.5.1)
autoprefixer-rails (>= 5.0.0.1)
sass (>= 3.3.0)
bootstrap-sass (3.3.6)
autoprefixer-rails (>= 5.2.1)
sass (>= 3.3.4)
builder (3.2.2)
chunky_png (1.3.4)
coderay (1.1.0)
chunky_png (1.3.5)
coderay (1.1.1)
coffee-script (2.4.1)
coffee-script-source
execjs
coffee-script-source (1.9.1.1)
coffee-script-source (1.10.0)
colored (1.2)
compass (1.0.3)
chunky_png (~> 1.2)
@ -39,25 +38,29 @@ GEM
sass (>= 3.2, < 3.5)
cri (2.7.0)
colored (~> 1.2)
excon (0.45.4)
excon (0.49.0)
execjs (2.6.0)
faker (1.5.0)
faker (1.6.3)
i18n (~> 0.5)
ffi (1.9.10)
fission (0.5.0)
CFPropertyList (~> 2.2)
fog (1.34.0)
fog (1.38.0)
fog-aliyun (>= 0.1.0)
fog-atmos
fog-aws (>= 0.6.0)
fog-brightbox (~> 0.4)
fog-cloudatcost (~> 0.1.0)
fog-core (~> 1.32)
fog-dynect (~> 0.0.2)
fog-ecloud (~> 0.1)
fog-google (>= 0.0.2)
fog-google (<= 0.1.0)
fog-json
fog-local
fog-openstack
fog-powerdns (>= 0.1.1)
fog-profitbricks
fog-rackspace
fog-radosgw (>= 0.0.2)
fog-riakcs
fog-sakuracloud (>= 0.0.4)
@ -67,44 +70,57 @@ GEM
fog-terremark
fog-vmfusion
fog-voxel
fog-vsphere (>= 0.4.0)
fog-xenserver
fog-xml (~> 0.1.1)
ipaddress (~> 0.5)
nokogiri (~> 1.5, >= 1.5.11)
fog-aliyun (0.1.0)
fog-core (~> 1.27)
fog-json (~> 1.0)
ipaddress (~> 0.8)
xml-simple (~> 1.1)
fog-atmos (0.1.0)
fog-core
fog-xml
fog-aws (0.7.6)
fog-aws (0.9.2)
fog-core (~> 1.27)
fog-json (~> 1.0)
fog-xml (~> 0.1)
ipaddress (~> 0.8)
fog-brightbox (0.9.0)
fog-brightbox (0.10.1)
fog-core (~> 1.22)
fog-json
inflecto (~> 0.0.2)
fog-core (1.32.1)
fog-cloudatcost (0.1.2)
fog-core (~> 1.36)
fog-json (~> 1.0)
fog-xml (~> 0.1)
ipaddress (~> 0.8)
fog-core (1.36.0)
builder
excon (~> 0.45)
formatador (~> 0.2)
mime-types
net-scp (~> 1.1)
net-ssh (>= 2.1.3)
fog-dynect (0.0.2)
fog-dynect (0.0.3)
fog-core
fog-json
fog-xml
fog-ecloud (0.3.0)
fog-core
fog-xml
fog-google (0.0.7)
fog-google (0.1.0)
fog-core
fog-json
fog-xml
fog-json (1.0.2)
fog-core (~> 1.0)
multi_json (~> 1.10)
fog-local (0.2.1)
fog-local (0.3.0)
fog-core (~> 1.27)
fog-openstack (0.1.2)
fog-core (>= 1.35)
fog-json (>= 1.0)
fog-xml (>= 0.1)
ipaddress (>= 0.8)
fog-powerdns (0.1.1)
fog-core (~> 1.27)
fog-json (~> 1.0)
@ -113,7 +129,12 @@ GEM
fog-core
fog-xml
nokogiri
fog-radosgw (0.0.4)
fog-rackspace (0.1.1)
fog-core (>= 1.35)
fog-json (>= 1.0)
fog-xml (>= 0.1)
ipaddress (>= 0.8)
fog-radosgw (0.0.5)
fog-core (>= 1.21.0)
fog-json
fog-xml (>= 0.0.1)
@ -121,13 +142,13 @@ GEM
fog-core
fog-json
fog-xml
fog-sakuracloud (1.1.0)
fog-sakuracloud (1.7.5)
fog-core
fog-json
fog-serverlove (0.1.2)
fog-core
fog-json
fog-softlayer (0.4.7)
fog-softlayer (1.1.0)
fog-core
fog-json
fog-storm_on_demand (0.1.1)
@ -142,6 +163,12 @@ GEM
fog-voxel (0.1.0)
fog-core
fog-xml
fog-vsphere (0.6.3)
fog-core
rbvmomi (~> 1.8)
fog-xenserver (0.2.3)
fog-core
fog-xml
fog-xml (0.1.2)
fog-core
nokogiri (~> 1.5, >= 1.5.11)
@ -155,50 +182,56 @@ GEM
pry (>= 0.9.12)
shellany (~> 0.0)
thor (>= 0.18.1)
guard-nanoc (1.0.2)
guard (>= 1.8.0)
nanoc (>= 3.6.3)
guard-compat (1.2.1)
guard-nanoc (2.0.0)
guard (~> 2.8)
guard-compat (~> 1.0)
nanoc (~> 4.0)
haml (4.0.7)
tilt
i18n (0.7.0)
inflecto (0.0.2)
ipaddress (0.8.0)
ipaddress (0.8.3)
json (1.8.3)
kramdown (1.8.0)
libv8 (3.16.14.11)
listen (3.0.3)
kramdown (1.10.0)
libv8 (3.16.14.13)
listen (3.0.6)
rb-fsevent (>= 0.9.3)
rb-inotify (>= 0.9)
lumberjack (1.0.9)
rb-inotify (>= 0.9.7)
lumberjack (1.0.10)
method_source (0.8.2)
mime-types (2.6.1)
mini_portile (0.6.2)
minitest (5.8.0)
mime-types (3.0)
mime-types-data (~> 3.2015)
mime-types-data (3.2016.0221)
mini_portile2 (2.0.0)
minitest (5.8.4)
multi_json (1.11.2)
nanoc (4.0.0rc2)
nanoc (4.1.5)
cri (~> 2.3)
nenv (0.2.0)
net-scp (1.2.1)
net-ssh (>= 2.6.5)
net-ssh (2.9.2)
nokogiri (1.6.6.2)
mini_portile (~> 0.6.0)
notiffany (0.0.7)
nenv (0.3.0)
nokogiri (1.6.7.2)
mini_portile2 (~> 2.0.0.rc2)
notiffany (0.0.8)
nenv (~> 0.1)
shellany (~> 0.0)
pry (0.10.1)
pry (0.10.3)
coderay (~> 1.1.0)
method_source (~> 0.8.1)
slop (~> 3.4)
rack (1.6.4)
rack-rewrite (1.5.1)
rb-fsevent (0.9.6)
rb-inotify (0.9.5)
rb-fsevent (0.9.7)
rb-inotify (0.9.7)
ffi (>= 0.5.0)
rb-readline (0.5.3)
rbvmomi (1.8.2)
builder
nokogiri (>= 1.4.1)
trollop
ref (2.0.0)
rouge (1.9.1)
sass (3.4.18)
sequel (4.26.0)
rouge (1.10.1)
sass (3.4.22)
sequel (4.32.0)
shellany (0.0.1)
slop (3.6.0)
systemu (2.6.5)
@ -207,15 +240,16 @@ GEM
ref
thor (0.19.1)
thread_safe (0.3.5)
tilt (2.0.1)
tilt (2.0.2)
trollop (2.1.2)
tzinfo (1.2.2)
thread_safe (~> 0.1)
uglifier (2.7.2)
execjs (>= 0.3.0)
json (>= 1.8.0)
uglifier (3.0.0)
execjs (>= 0.3.0, < 3)
unf (0.1.4)
unf_ext
unf_ext (0.0.7.1)
unf_ext (0.0.7.2)
xml-simple (1.1.5)
PLATFORMS
ruby
@ -234,10 +268,11 @@ DEPENDENCIES
haml
kramdown
mime-types
nanoc (~> 4.0.0b3)
nanoc
nokogiri
rack
rack-rewrite
rb-readline
rouge
sequel
systemu
@ -245,3 +280,6 @@ DEPENDENCIES
thor
uglifier
unf
BUNDLED WITH
1.11.2

2
Rules
View File

@ -66,7 +66,7 @@ compile '/articles/*', rep: :archive do
end
compile %r{^(/feeds?/.*|/sitemap/|/ignore-me/)$} do
filter :haml
filter :haml, format: :xhtml
end
compile %r{^/(tag|category)/} do

View File

@ -0,0 +1,56 @@
---
title: "Free kanban tool face-off: Trello vs LeanKit"
kind: article
created_at: Thu 10 Sep 2015 17:39:38 BST
tags:
- Kanban
- Productivity
- Trello
- LeanKit
---
I like the [personal kanban](/blog/kanban-gtd/) way of working. It satisfies my need to make lists and track everything in one place, while being flexible enough to evolve and adapt with minimal friction, and I like the feeling it gives of tasks flowing through my workflow. I also prefer digital tools in general, because (battery life permitting) I can generally use them wherever I am.
For online kanban-ing I really like [Trello](http://trello.com/) but recently I've been trying out another product, [LeanKit](http://www.leankit.com/), so I wanted to note down my thoughts on how they measure up.
## Trello
Trello is fairly simple in concept, though probably inspired by the ideas of kanban. The overall structure is that you create cards, arrange them vertically into columns ("lists") and group the lists into boards. That's all the structure there is, but you can have any number of boards, lists and cards.
- What I like:
- Create cards via email (especially when combined with automation tool [IFTTT](http://ifttt.com))
- Smooth interface with a sense of fun and amazing mobile apps
- Can have lots of boards with different backgrounds
- Flexible sharing features for individuals and organisations
- Keyboard shortcuts for many features
- What I don't like:
- Inflexibility of structure (partly overcome with multiple boards)
- The lists all look the same so it's hard to orient yourself when quickly glancing at one
## LeanKit
Where Trello is all about simplicity, LeanKit is more about power. The overarching concept is similar: you're still arranging cards in columns ("lanes" in LeanKit). The key difference is that LeanKit has much more flexibility in how you arrange your lanes: you can split them vertically or horizontally as many times as you like, allowing much more hierarchical grouping structures.
- What I like:
- Very flexible: you can freely split lanes vertically & horizontally to create your desired structure
- It hides away old cards in a fully searchable archive
- Bulk editing/moving of cards
- Some premium features (e.g. sub-boards within cards, analytics)
- What I don't like
- The best features are paid-only:
- Sharing boards
- Moving stuff between boards
- More than 3 boards
- The interface feels stuck in the mid-2000s
- Poor mobile support: only third-party apps are available and their support of some features is limited
- Possibly *too* flexible: it tends to lead me down process-tweaking rabbit-holes when I should be getting things done
## What am I doing now?
LeanKit was an interesting experiment, and I think it has a lot of value for those who need those more advanced features and are prepared to pay. At the end of the day though, I'm not one of those people so I've moved back to Trello.
I have, though, learned a lot about flexible use of boards from my experience with LeanKit and I'm experimenting a lot more with how I use them now I'm back in Trello. For example I'm increasingly creating separate boards for particular *types* of task (e.g. people I want to meet) and for larger projects.
In summary: if you can justify paying the cash and don't mind the clunkiness, try LeanKit, but otherwise, just use Trello!
I'd be interested to know which company is the more profitable: does LeanKit's focus on big enterprise customers pay off or hold them back by putting off individuals?

View File

@ -0,0 +1,25 @@
---
title: "#IDCC16 Day 0: business models for research data management"
kind: article
created_at: Mon 22 Feb 2016 18:20:55 CET
tags:
- IDCC16
- Research data management
- Conference
- Service planning
---
I'm at the [International Digital Curation Conference 2016][IDCC16] (#IDCC16) in Amsterdam this week. It's always a good opportunity to pick up some new ideas and catch up with colleagues from around the world, and I always come back full of new possibilities. I'll try and do some more reflective posts after the conference but I thought I'd do some quick reactions while everything is still fresh.
Monday and Thursday are pre- and post-conference workshop days, and today I attended [*Developing Research Data Management Services*][workshop]. Joy Davidson and Jonathan Rans from the [Digital Curation Centre (DCC)][] introduced us to the [Business Model Canvas][BMC], a template for designing a business model on a single sheet of paper. The model prompts you to think about all of the key facets of a sustainable, profitable business, and can easily be adapted to the task of building a service model within a larger institution. The DCC used it as part of the [Collaboration to Clarify Curation Costs (4C) project][4C], whose output the [Curation Costs Exchange][CCEx] is also worth a look.
It was a really useful exercise to be able to work through the whole process for an aspect of research data management (my table focused on training & guidance provision), both because of the ideas that came up and also the experience of putting the framework into practice. It seems like a really valuable tool and I look forward to seeing how it might help us with our RDM service development.
Tomorrow the conference proper begins, with a range of keynotes, panel sessions and birds-of-a-feather meetings so hopefully more then!
[IDCC16]: http://www.dcc.ac.uk/events/idcc16
[workshop]: http://www.dcc.ac.uk/events/idcc16/workshops#Workshop%201
[Digital Curation Centre (DCC)]: http://www.dcc.ac.uk/
[BMC]: http://www.businessmodelgeneration.com/canvas/bmc
[4C]: http://www.curationexchange.org/about#4cproject
[CCEx]: http://www.curationexchange.org/

View File

@ -0,0 +1,34 @@
---
title: "#IDCC16 Day 1: Open Data"
kind: article
created_at: Tue 23 Feb 2016 19:43:57 CET
tags:
- IDCC16
- Research data management
- Conference
- Open data
---
The main conference opened today with an inspiring keynote by Barend Mons, Professor in Biosemantics, Leiden University Medical Center. The talk had plenty of great stuff, but two points stood out for me.
First, Prof Mons described a newly discovered link between Huntingdon's Disease and a previously unconsidered gene. No-one had previously recognised this link, but on mining the literature, an indirect link was identified in more than 10% of the roughly 1 million scientific claims analysed. This is knowledge for which we already had more than enough evidence, but **which could never have been discovered without such a wide-ranging computational study**.
Second, he described a number of behaviours which **should be considered "malpractice" in science**:
- Relying on supplementary data in articles for data sharing: the majority of this is trash (paywalled, embedded in bitmap images, missing)
- Using the Journal Impact Factor to evaluate science and ignoring altmetrics
- Not writing data stewardship plans for projects (he prefers this term to "data management plan")
- Obstructing tenure for data experts by assuming that all highly-skilled scientists must have a long publication record
A second plenary talk from Andrew Sallons of the [Centre for Open Science](http://cos.io) introduced a number of interesting-looking bits and bobs, including the [Transparency & Openness Promotion (TOP) Guidelines][TOP] which set out a pathway to help funders, publishers and institutions move towards more open science.
[TOP]: https://osf.io/9f6gx/wiki/Guidelines/
The rest of the day was taken up with a panel on open data, a poster session, some demos and a birds-of-a-feather session on sharing sensitive/confidential data. There was a great range of posters, but a few that stood out to me were:
- Lessons learned about ISO 16363 ("Audit and certification of trustworthy digital repositories") certification from the British Library
- Two separate posters (from the Universities of Toronto and Colorado) about disciplinary RDM information & training for liaison librarians
- A template for sharing psychology data developed by a psychologist-turned-information researcher from Carnegie Mellon University
More to follow, but for now it's time for the conference dinner!

View File

@ -0,0 +1,53 @@
---
title: '#IDCC16 day 2: new ideas'
teaser: 'Lots of new ideas from #IDCC16 day 2!'
kind: article
created_at: Wednesday 16 Mar 2016 07:44:14
tags:
- IDCC16
- Conference
- Open Data
- Research Data Management
---
*Well, I did a great job of blogging the conference for a couple of days, but then I was hit by the bug that's been going round and didn't have a lot of energy for anything other than paying attention and making notes during the day! I've now got round to reviewing my notes so here are a few reflections on day 2.*
Day 2 was the day of many parallel talks! So many great and inspiring ideas to take in! Here are a few of my take-home points.
## Big science and the long tail ##
The first parallel session had examples of practical data management in the real world. Jian Qin & Brian Dobreski (School of Information Studies, Syracuse University) worked on reproducibility with one of the research groups involved with the recent gravitational wave discovery. "Reproducibility" for this work (as with much of physics) mostly equates to computational reproducibility: tracking the provenance of the code and its input and output is key. They also found that in practice the scientists' focus was on making the big discovery, and ensuring reproducibility was seen as secondary. This goes some way to explaining why current workflows and tools don't really capture enough metadata.
Milena Golshan & Ashley Sands (Center for Knowledge Infrastructures, UCLA) investigated the use of Software-as-a-Service (SaaS, such as Google Drive, Dropbox or more specialised tools) as a way of meeting the needs of long-tail science research such as ocean science. This research is characterised by small teams, diverse data, dynamic local development of tools, local practices and difficulty disseminating data. This results in a need for researchers to be generalists, as opposed to "big science" research areas, where they can afford to specialise much more deeply. Such generalists tend to develop their own isolated workflows, which can differ greatly even within a single lab. Long-tail research also often struggles from a lack of dedicated IT support. They found that use of SaaS could help to meet these challenges, but with a high cost required to cover the needed guarantees of security and stability.
## Education & training ##
This session focussed on the professional development of library staff. Eleanor Mattern (University of Pittsburgh) described the immersive training introduced to improve librarians' understanding of the data needs of their subject areas in delivering their [RDM service delivery model][UPitt model]. The participants each conducted a "disciplinary deep dive", shadowing researchers and then reporting back to the group on their discoveries with a presentation and discussion.
Liz Lyon (also University of Pittsburgh, formerly UKOLN/DCC) gave a systematic breakdown of the skills, knowledge and experience required in different data-related roles, obtained from an analysis of job adverts. She identified distinct roles of data analyst, data engineer and data journalist, and as well as each role's distinctive skills, pinpointed common requirements of all three: Python, R, SQL and Excel. This work follows on from an earlier phase which identified an allied set of roles: data archivist, data librarian and data steward.
[UPitt model]: http://d-scholarship.pitt.edu/26738/
## Data sharing and reuse ##
This session gave an overview of several specific workflow tools designed for researchers. Marisa Strong (University of California Curation Centre/California Digital Libraries) presented *[Dash](https://dash.cdlib.org/)*, a highly modular tool for manual data curation and deposit by researchers. It's built on their flexible backend, *Stash*, and though it's currently optimised to deposit in their Merritt data repository it could easily be hooked up to other repositories. It captures DataCite metadata and a few other fields, and is integrated with ORCID to uniquely identify people.
In a different vein, Eleni Castro (Institute for Quantitative Social Science, Harvard University) discussed some of the ways that [Harvard's Dataverse](http://dataverse.org/) repository is streamlining deposit by enabling automation. It provides a number of standardised endpoints such as [OAI-PMH](https://www.openarchives.org/pmh/) for metadata harvest and [SWORD](http://swordapp.org/) for deposit, as well as custom APIs for discovery and deposit. Interesting use cases include:
- An addon for the [Open Science Framework](https://osf.io/) to deposit in Dataverse via SWORD
- An [R package](https://cran.r-project.org/web/packages/dvn/README.html) to enable automatic deposit of simulation and analysis results
- Integration with publisher workflows Open Journal Systems
- A growing set of visualisations for deposited data
In the future they're also looking to integrate with [DMPtool](https://dmptool.org/) to capture data management plans and with Archivematica for digital preservation.
Andrew Treloar ([Australian National Data Service](http://ands.org.au/)) gave us some reflections on the ANDS "applications programme", a series of 25 small funded projects intended to address the fourth of their strategic transformations, *single use**reusable*. He observed that essentially these projects worked because they were able to throw money at a problem until they found a solution: not very sustainable. Some of them stuck to a [traditional "waterfall" approach to project management](https://en.m.wikipedia.org/wiki/Waterfall_model), resulting in "the right solution 2 years late". Every researcher's needs are "special" and communities are still constrained by old ways of working. The conclusions from this programme were that:
- "Good enough" is fine most of the time
- Adopt/Adapt/Augment is better than Build
- Existing toolkits let you focus on the 10% functionality that's missing
- Succussful projects involved research champions who can: 1) articulate their community's requirements; and 2) promote project outcomes
## Summary ##
All in all, it was a really exciting conference, and I've come home with loads of new ideas and plans to develop our services at Sheffield. I noticed a continuation of some of the trends I spotted at last year's IDCC, especially an increasing focus on "second-order" problems: we're no longer spending most of our energy just convincing researchers to take data management seriously and are able to spend more time helping them to do it *better* and get value out of it. There's also a shift in emphasis (identified by closing speaker Cliff Lynch) from sharing to reuse, and making sure that data is not just available but valuable.

View File

@ -0,0 +1,11 @@
---
title: "Testing automatic posting to Twitter"
teaser: "If it all works correctly, this should be autoposted to Twitter"
kind: article
created_at: Tue 29 Mar 2016 14:39:12 BST
tags:
- Meta
- Test
---
I've been experimenting with moving away from IFTTT for syndicating my content to other platforms. More on that later, but for now this is a test post to make sure it's all working. 😎

View File

@ -2,4 +2,36 @@
feed_url: http://feeds.feedburner.com/erambler
is_hidden: true
---
= fix_atom_link_type atom_feed
!!! XML UTF-8
%feed{'xmlns': 'http://www.w3.org/2005/Atom',
'xmlns:prism': 'http://prismstandard.org/namespaces/basic/2.1/'}
- articles_sorted = articles.sort_by{|a| attribute_to_time(a[:updated_at] || a[:created_at]).to_iso8601_time}.reverse
%id= @config[:base_url] + '/'
%title= @config[:title]
%updated= attribute_to_time(articles_sorted.first[:updated_at] || articles_sorted.first[:created_at]).to_iso8601_time
%link{rel: :alternate, href: @config[:base_url] + '/'}
%link{rel: :self, href: @config[:feed_url]}
%link{rel: :license, type: 'application/rdf+xml', href: @config[:license_url] + 'rdf'}
%author
%name= @config[:author_name]
%uri= @config[:author_uri]
- articles_sorted.take(5).each do |a|
%entry
%id= atom_tag_for a
%title{type: 'html'}= a[:title]
- if a[:teaser] then
%prism:teaser= a[:teaser]
%published= attribute_to_time(a[:created_at]).to_iso8601_time
%updated= attribute_to_time(a[:updated_at] || a[:created_at]).to_iso8601_time
%link{rel: :alternate,
href: url_for(a),
type: 'text/html'}
%content{type: :html}
&= a.compiled_content(snapshot: :pre)
-# %div{xmlns: 'http://www.w3.org/1999/xhtml'}= a.compiled_content(snapshot: :pre)

View File

@ -14,14 +14,14 @@
%section
#content
- if %w{single-post static-page}.include? @item[:page_type]
%article
%article.h-entry
.row
%h1.post-title= @item[:title]
%h1.post-title.p-name= @item[:title]
.row
- if @item[:page_type] == 'single-post'
.post-info= render 'partials/post_info', :item => @item
.post-body
.post-content= yield
.post-content.e-content= yield
- if @item[:page_type] == 'single-post'
#disqus_thread

View File

@ -1,10 +1,10 @@
%a.license(rel="license" href="http://creativecommons.org/licenses/by-sa/2.0/uk/")
%img(alt="Creative Commons License" style="border-width:0" src="http://i.creativecommons.org/l/by-sa/2.0/uk/88x31.png")
%a.license{rel: :license, href: @config[:license_url]}
%img(alt="Creative Commons License" style="border-width:0" src="http://i.creativecommons.org/l/by-sa/4.0/88x31.png")
%span(xmlns:dct="http://purl.org/dc/terms/" href="http://purl.org/dc/dcmitype/Text" property="dct:title" rel="dct:type")
= @config[:title]
by
%a(xmlns:cc="http://creativecommons.org/ns#" href="#{@config[:author_uri]}" property="cc:attributionName" rel="cc:attributionURL")
= @config[:author_name]
is licensed under a
%a(rel="license" href="http://creativecommons.org/licenses/by-sa/2.0/uk/")
Creative Commons Attribution-ShareAlike 2.0 UK: England & Wales License
%a{rel: :license, href: @config[:license_url]}
Creative Commons Attribution-ShareAlike 4.0 International license

View File

@ -1,11 +1,11 @@
- item_date = date_for @item
- all_tags = tags_for @item
.post-date
= item_date.strftime '%A %e %B %Y'
.post-date.dt-published
%a.u-url{:href => url_for(@item)}= item_date.strftime '%A %e %B %Y'
- if all_tags.length > 0
Tagged with
%ul.post-tags
- all_tags.each do |x|
%li= link_for_tag x
%li.p-category= link_for_tag x

View File

@ -1,8 +1,9 @@
.sidebar-box.about-me
.sidebar-box.about-me.h-card
-# %h2 About me
:kramdown
Hi, I'm Jez. Welcome to my blog, where I talk about technology in
research and higher education, including:
Hi, I'm [Jez Cope](http://erambler.co.uk){:.p-name.u-url} and this is my
blog, where I talk about technology in research and higher
education, including:
- Research data management;
- e-Research;
@ -18,9 +19,9 @@
.sidebar-box.links
%h2 Me elsewhere
:kramdown
- [Twitter](http://twitter.com/jezcope)
- [github](https://github.com/jezcope)
- [LinkedIn](http://linkedin.com/in/jezcope)
- [Twitter](https://twitter.com/jezcope){:rel='me'}
- [github](https://github.com/jezcope){:rel='me'}
- [LinkedIn](https://linkedin.com/in/jezcope)
- [Diigo](http://diigo.com/user/jezcope)
- [Zotero](https://www.zotero.org/jezcope)
- [Google+](http://gplus.to/jezcope)

View File

@ -5,7 +5,8 @@ author_uri: http://erambler.co.uk/
hostname: erambler.co.uk
base_url: http://erambler.co.uk
feed_url: http://feeds.feedburner.com/erambler
feed_url: http://erambler.co.uk/feed.xml
license_url: http://creativecommons.org/licenses/by-sa/4.0/
ga_account: UA-10201101-1

35
wercker.yml Normal file
View File

@ -0,0 +1,35 @@
box: ruby:2.2.4
build:
steps:
- script:
name: configure nokogiri gem
code: bundle config build.nokogiri --use-system-libraries
- bundle-install
- script:
name: nanoc compile
code: bundle exec nanoc compile
deploy:
steps:
- s3sync:
source_dir: output/
delete-removed: true
bucket-url: $AWS_BUCKET_URL
key-id: $AWS_ACCESS_KEY_ID
key-secret: $AWS_SECRET_ACCESS_KEY
testing:
- script:
name: configure git
code: |-
git config --global user.email "j.cope@erambler.co.uk"
git config --global user.name "Jez Cope"
rm -rf .git
- script:
name: deploy to github pages
code: |-
cd output
git init
git add .
git commit -m "Deploy commit from $WERCKER_STARTED_BY"
git push -f $GIT_REMOTE master:gh-pages