project page: Add option to list child talks and publications (Close #206)

* add `[params.projects]` config section to `config.toml`
* add `projects = [""]` array to publication and talk frontmatter
* project page: Add backwards compatible child list which considers 
   both new `projects` array and`url_project`
* update archetypes
* update example site
* update publication button links to consider `projects` array
* update talk button links to consider `projects` array
* remove details button from talk links to be consistent with publication links
This commit is contained in:
Dongdong Tian 2017-10-21 16:48:27 -05:00 committed by gcushen
commit 4a72683eeb
9 changed files with 203 additions and 39 deletions

View file

@ -25,19 +25,18 @@ publication_short = ""
abstract = ""
abstract_short = ""
# Does this page contain LaTeX math? (true/false)
math = false
# Does this page require source code highlighting? (true/false)
highlight = true
# Featured image thumbnail (optional)
image_preview = ""
# Is this a selected publication? (true/false)
selected = false
# Links (optional)
# Projects (optional).
# Associate this publication with one or more of your projects.
# Simply enter the filename (excluding '.md') of your project file in `content/project/`.
projects = [""]
# Links (optional).
url_pdf = ""
url_preprint = ""
url_code = ""
@ -48,6 +47,12 @@ url_video = ""
url_poster = ""
url_source = ""
# Does this page contain LaTeX math? (true/false)
math = false
# Does this page require source code highlighting? (true/false)
highlight = true
# Featured image
# Place your image in the `static/img/` folder and reference its filename below, e.g. `image = "example.jpg"`.
[header]

View file

@ -3,19 +3,36 @@ title = "{{ replace .TranslationBaseName "-" " " | title }}"
date = {{ .Date }}
draft = false
# Abstract and optional shortened version.
abstract = ""
abstract_short = ""
# Name of event and optional event URL.
event = ""
event_url = ""
# Location of event.
location = ""
# Is this a selected talk? (true/false)
selected = false
math = false
highlight = true
# Projects (optional).
# Associate this talk with one or more of your projects.
# Simply enter the filename (excluding '.md') of your project file in `content/project/`.
projects = [""]
# Links (optional).
url_pdf = ""
url_slides = ""
url_video = ""
url_code = ""
# Does the content use math formatting?
math = false
# Does the content use source code highlighting?
highlight = true
# Featured image
# Place your image in the `static/img/` folder and reference its filename below, e.g. `image = "example.jpg"`.

View file

@ -141,6 +141,18 @@ defaultContentLanguageInSubdir = false
'Book chapter' # 6
]
# Configuration of project pages.
[params.projects]
# List publications and talks related to the project?
list_children = true
# Publication list format.
# 0 = Simple
# 1 = Detailed
# 2 = APA
# 3 = MLA
publication_format = 3
# Social/Academic Networking
#
# Icon pack "fa" includes the following social network icons:

View file

@ -1,30 +1,63 @@
+++
abstract = "We present a mobile visual clothing search system whereby a smart phone user can either choose a social networking photo or take a new photo of a person wearing clothing of interest and search for similar clothing in a retail database. From the query image, the person is detected, clothing is segmented, and clothing features are extracted and quantized. The information is sent from the phone client to a server, where the feature vector of the query image is used to retrieve similar clothing products from online databases. The phone's GPS location is used to re-rank results by retail store location. State of the art work focuses primarily on the recognition of a diverse range of clothing offline and pays little attention to practical applications. Evaluated on a challenging dataset, the system is relatively fast and achieves promising results."
abstract_short = "A mobile visual clothing search system is presented whereby a smart phone user can either choose a social networking image or capture a new photo of a person wearing clothing of interest and search for similar clothing in a large cloud-based ecommerce database. The phone's GPS location is used to re-rank results by retail store location, to inform the user of local stores where similar clothing items can be tried on."
authors = ["GA Cushen", "MS Nixon"]
title = "Mobile visual clothing search"
date = "2013-07-01"
image_preview = ""
math = true
# Authors. Comma separated list, e.g. `["Bob Smith", "David Jones"]`.
authors = ["GA Cushen", "MS Nixon"]
# Publication type.
# Legend:
# 0 = Uncategorized
# 1 = Conference proceedings
# 2 = Journal
# 3 = Work in progress
# 4 = Technical report
# 5 = Book
# 6 = Book chapter
publication_types = ["1"]
# Publication name and optional abbreviated version.
publication = "In *International Conference on Multimedia and Expo Workshops (ICMEW)*, IEEE."
publication_short = "In *ICMEW*"
# Abstract and optional shortened version.
abstract = "We present a mobile visual clothing search system whereby a smart phone user can either choose a social networking photo or take a new photo of a person wearing clothing of interest and search for similar clothing in a retail database. From the query image, the person is detected, clothing is segmented, and clothing features are extracted and quantized. The information is sent from the phone client to a server, where the feature vector of the query image is used to retrieve similar clothing products from online databases. The phone's GPS location is used to re-rank results by retail store location. State of the art work focuses primarily on the recognition of a diverse range of clothing offline and pays little attention to practical applications. Evaluated on a challenging dataset, the system is relatively fast and achieves promising results."
abstract_short = "A mobile visual clothing search system is presented whereby a smart phone user can either choose a social networking image or capture a new photo of a person wearing clothing of interest and search for similar clothing in a large cloud-based ecommerce database. The phone's GPS location is used to re-rank results by retail store location, to inform the user of local stores where similar clothing items can be tried on."
# Featured image thumbnail (optional)
image_preview = ""
# Is this a selected publication? (true/false)
selected = true
title = "Mobile visual clothing search"
# Projects (optional).
# Associate this publication with one or more of your projects.
# Simply enter the filename (excluding '.md') of your project file in `content/project/`.
projects = ["example-external-project"]
# Links (optional).
url_pdf = "http://eprints.soton.ac.uk/352095/1/Cushen-IMV2013.pdf"
url_preprint = "http://eprints.soton.ac.uk/352095/1/Cushen-IMV2013.pdf"
url_code = "#"
url_dataset = "#"
url_pdf = "http://eprints.soton.ac.uk/352095/1/Cushen-IMV2013.pdf"
url_project = "project/deep-learning/"
url_project = "#"
url_slides = "#"
url_video = "#"
url_poster = "#"
url_preprint = "http://eprints.soton.ac.uk/352095/1/Cushen-IMV2013.pdf"
url_source = "#"
[[url_custom]]
name = "Custom Link"
url = "http://www.example.org"
# Optional featured image (relative to `static/img/` folder).
# Does the content use math formatting?
math = true
# Does the content use source code highlighting?
highlight = true
# Featured image
# Place your image in the `static/img/` folder and reference its filename below, e.g. `image = "example.jpg"`.
[header]
image = "headers/bubbles-wide.jpg"
caption = "My caption :smile:"

View file

@ -1,21 +1,62 @@
+++
abstract = "Person re-identification is a critical security task for recognizing a person across spatially disjoint sensors. Previous work can be computationally intensive and is mainly based on low-level cues extracted from RGB data and implemented on a PC for a fixed sensor network (such as traditional CCTV). We present a practical and efficient framework for mobile devices (such as smart phones and robots) where high-level semantic soft biometrics are extracted from RGB and depth data. By combining these cues, our approach attempts to provide robustness to noise, illumination, and minor variations in clothing. This mobile approach may be particularly useful for the identification of persons in areas ill-served by fixed sensors or for tasks where the sensor position and direction need to dynamically adapt to a target. Results on the BIWI dataset are preliminary but encouraging. Further evaluation and demonstration of the system will be available on our website."
abstract_short = ""
authors = ["GA Cushen"]
title = "A Person Re-Identification System For Mobile Devices"
date = "2015-09-01"
image_preview = ""
math = true
# Authors. Comma separated list, e.g. `["Bob Smith", "David Jones"]`.
authors = ["GA Cushen"]
# Publication type.
# Legend:
# 0 = Uncategorized
# 1 = Conference proceedings
# 2 = Journal
# 3 = Work in progress
# 4 = Technical report
# 5 = Book
# 6 = Book chapter
publication_types = ["2"]
# Publication name and optional abbreviated version.
publication = "In *Signal Image Technology & Internet Systems (SITIS)*, IEEE."
publication_short = "In *SITIS*"
# Abstract and optional shortened version.
abstract = "Person re-identification is a critical security task for recognizing a person across spatially disjoint sensors. Previous work can be computationally intensive and is mainly based on low-level cues extracted from RGB data and implemented on a PC for a fixed sensor network (such as traditional CCTV). We present a practical and efficient framework for mobile devices (such as smart phones and robots) where high-level semantic soft biometrics are extracted from RGB and depth data. By combining these cues, our approach attempts to provide robustness to noise, illumination, and minor variations in clothing. This mobile approach may be particularly useful for the identification of persons in areas ill-served by fixed sensors or for tasks where the sensor position and direction need to dynamically adapt to a target. Results on the BIWI dataset are preliminary but encouraging. Further evaluation and demonstration of the system will be available on our website."
abstract_short = ""
# Featured image thumbnail (optional)
image_preview = ""
# Is this a selected publication? (true/false)
selected = false
title = "A Person Re-Identification System For Mobile Devices"
# Projects (optional).
# Associate this publication with one or more of your projects.
# Simply enter the filename (excluding '.md') of your project file in `content/project/`.
projects = ["deep-learning"]
# Links (optional).
url_pdf = "http://arxiv.org/pdf/1512.04133v1"
url_preprint = ""
url_code = ""
url_dataset = ""
url_pdf = "http://arxiv.org/pdf/1512.04133v1"
url_project = "project/deep-learning/"
url_project = ""
url_slides = ""
url_video = ""
url_poster = ""
url_source = ""
# Does the content use math formatting?
math = true
# Does the content use source code highlighting?
highlight = true
# Featured image
# Place your image in the `static/img/` folder and reference its filename below, e.g. `image = "example.jpg"`.
[header]
image = ""
caption = ""
+++

View file

@ -3,22 +3,36 @@ date = "2017-01-01T00:00:00"
title = "Example Talk"
abstract = ""
abstract_short = ""
event = "Hugo Academic Theme Conference"
event = "Academic Theme Conference"
event_url = "https://example.org"
location = "London, United Kingdom"
# Is this a selected talk? (true/false)
selected = false
math = true
# Projects (optional).
# Associate this talk with one or more of your projects.
# Simply enter the filename (excluding '.md') of your project file in `content/project/`.
projects = ["deep-learning"]
# Links (optional).
url_pdf = ""
url_slides = ""
url_video = ""
url_code = ""
# Optional featured image (relative to `static/img/` folder).
# Does the content use math formatting?
math = true
# Does the content use source code highlighting?
highlight = true
# Featured image
# Place your image in the `static/img/` folder and reference its filename below, e.g. `image = "example.jpg"`.
[header]
image = "headers/bubbles-wide.jpg"
caption = "My caption :smile:"
+++
Embed your slides or video here using [shortcodes](https://gcushen.github.io/hugo-academic-demo/post/writing-markdown-latex/). Further details can easily be added using *Markdown* and $\rm \LaTeX$ math code.
Embed your slides or video here using [shortcodes](https://sourcethemes.com/academic/post/writing-markdown-latex/). Further details can easily be added using *Markdown* and $\rm \LaTeX$ math code.

View file

@ -29,11 +29,19 @@
{{ i18n "btn_dataset" }}
</a>
{{ end }}
{{ with $.Params.url_project }}
<a class="btn btn-primary btn-outline{{ if $is_list }} btn-xs{{end}}" href="{{ . | absLangURL }}">
{{ if $.Params.projects }}
{{ range $.Params.projects }}
<a class="btn btn-primary btn-outline{{ if $is_list }} btn-xs{{end}}" href="{{ printf "project/%s/" . | relURL }}">
{{ i18n "btn_project" }}
</a>
{{ end }}
{{ else }}
{{ with $.Params.url_project }}
<a class="btn btn-primary btn-outline{{ if $is_list }} btn-xs{{end}}" href="{{ . | absLangURL }}" target="_blank" rel="noopener">
{{ i18n "btn_project" }}
</a>
{{ end }}
{{ end }}
{{ with $.Params.url_poster }}
<a class="btn btn-primary btn-outline{{ if $is_list }} btn-xs{{end}}" href="{{ . | absURL }}" target="_blank" rel="noopener">
{{ i18n "btn_poster" }}

View file

@ -1,11 +1,6 @@
{{ $is_list := .is_list }}
{{ $ := .content }}
{{ if $is_list }}
<a class="btn btn-primary btn-outline btn-xs" href="{{ $.Permalink }}">
{{ i18n "btn_details" }}
</a>
{{ end }}
{{ with $.Params.url_pdf }}
<a class="btn btn-primary btn-outline{{ if $is_list }} btn-xs{{end}}" href="{{ . | absURL }}">
{{ i18n "btn_pdf" }}
@ -26,6 +21,11 @@
{{ i18n "btn_code" }}
</a>
{{ end }}
{{ range $.Params.projects }}
<a class="btn btn-primary btn-outline{{ if $is_list }} btn-xs{{end}}" href="{{ printf "project/%s/" . | relURL }}">
{{ i18n "btn_project" }}
</a>
{{ end }}
{{ range $.Params.url_custom }}
<a class="btn btn-primary btn-outline{{ if $is_list }} btn-xs{{end}}" href="{{ .url | absURL }}">
{{ .name }}

View file

@ -21,8 +21,42 @@
<div class="article-style" itemprop="articleBody">
{{ .Content }}
</div>
{{ $page := . }}
{{ $project := .File.TranslationBaseName }}
{{ $project_path := printf "%s/%s/" .Section $project }}
{{ if (.Site.Params.projects.list_children | default true) }}
{{ $items := where (where .Site.RegularPages "Type" "publication") ".Params.projects" "intersect" (slice $project) }}
{{ $items := $items | union (where (where .Site.RegularPages "Type" "publication") ".Params.url_project" $project_path) }}
{{ $pubs_len := len $items }}
{{ if ge $pubs_len 1 }}
<h2>{{ (i18n "publications") }}</h2>
{{ range $items }}
{{ if eq $page.Site.Params.projects.publication_format 1 }}
{{ partial "publication_li_detailed" . }}
{{ else if eq $page.Site.Params.projects.publication_format 2 }}
{{ partial "publication_li_apa" . }}
{{ else if eq $page.Site.Params.projects.publication_format 3 }}
{{ partial "publication_li_mla" . }}
{{ else }}
{{ partial "publication_li_simple" . }}
{{ end }}
{{ end }}
{{ end }}
{{ $items := where (where .Site.RegularPages "Type" "talk") ".Params.projects" "intersect" (slice $project) }}
{{ $items := $items | union (where (where .Site.RegularPages "Type" "publication") ".Params.url_project" $project_path) }}
{{ $talks_len := len $items }}
{{ if ge $talks_len 1 }}
<h2>{{ (i18n "talks") }}</h2>
{{ range $items }}
{{ partial "talk_li_simple" . }}
{{ end }}
{{ end }}
{{ end }}
</div>
</div>
</article>