diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/404.html b/404.html new file mode 100644 index 00000000..b6f2c626 --- /dev/null +++ b/404.html @@ -0,0 +1,1359 @@ + + + + + + + + + + + + + + + + + + Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/about/index.html b/about/index.html new file mode 100644 index 00000000..40337e04 --- /dev/null +++ b/about/index.html @@ -0,0 +1,1397 @@ + + + + + + + + + + + + + + + + + + + + + + About uPheno - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

About uPheno

+

The uPheno project aims to unify the annotation of phenotypes across species in a manner analogous to unification of gene function annotation by the Gene Ontology. +uPheno 2.0 builds on earlier efforts with a strategy that directly leverages the work of the phenotype ontology development community and incorporates phenotypes from a much wider range of species. +We have organised a collaborative community effort, including representatives of all major model organism databases, to document and align formal design patterns for representing phenotypes and further develop reference ontologies, such as PATO, which are used in these patterns. +A common development infrastructure makes it easy to use these design patterns to generate both species-specific ontologies and a species-independent layer that subsumes them. +The resulting community-curated ontology for the representation and integration of phenotypes across species serves two general purposes:
+- Providing a community-developed framework for ontology editors to bootstrap, maintain and extend their phenotype ontologies in a scalable and standardised manner.
+- Facilitating the retrieval and comparative analysis of species-specific phenotypes through a deep layer of species-independent phenotypes.

+

Currently, the development of uPheno is organized by a group that meets biweekly. See the meetings page for more info, including how to participate.

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 00000000..1cf13b9f Binary files /dev/null and b/assets/images/favicon.png differ diff --git a/assets/javascripts/bundle.51d95adb.min.js b/assets/javascripts/bundle.51d95adb.min.js new file mode 100644 index 00000000..b20ec683 --- /dev/null +++ b/assets/javascripts/bundle.51d95adb.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var Hi=Object.create;var xr=Object.defineProperty;var Pi=Object.getOwnPropertyDescriptor;var $i=Object.getOwnPropertyNames,kt=Object.getOwnPropertySymbols,Ii=Object.getPrototypeOf,Er=Object.prototype.hasOwnProperty,an=Object.prototype.propertyIsEnumerable;var on=(e,t,r)=>t in e?xr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,P=(e,t)=>{for(var r in t||(t={}))Er.call(t,r)&&on(e,r,t[r]);if(kt)for(var r of kt(t))an.call(t,r)&&on(e,r,t[r]);return e};var sn=(e,t)=>{var r={};for(var n in e)Er.call(e,n)&&t.indexOf(n)<0&&(r[n]=e[n]);if(e!=null&&kt)for(var n of kt(e))t.indexOf(n)<0&&an.call(e,n)&&(r[n]=e[n]);return r};var Ht=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Fi=(e,t,r,n)=>{if(t&&typeof t=="object"||typeof t=="function")for(let o of $i(t))!Er.call(e,o)&&o!==r&&xr(e,o,{get:()=>t[o],enumerable:!(n=Pi(t,o))||n.enumerable});return e};var yt=(e,t,r)=>(r=e!=null?Hi(Ii(e)):{},Fi(t||!e||!e.__esModule?xr(r,"default",{value:e,enumerable:!0}):r,e));var fn=Ht((wr,cn)=>{(function(e,t){typeof wr=="object"&&typeof cn!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(wr,function(){"use strict";function e(r){var n=!0,o=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(T){return!!(T&&T!==document&&T.nodeName!=="HTML"&&T.nodeName!=="BODY"&&"classList"in T&&"contains"in T.classList)}function f(T){var Ke=T.type,We=T.tagName;return!!(We==="INPUT"&&a[Ke]&&!T.readOnly||We==="TEXTAREA"&&!T.readOnly||T.isContentEditable)}function c(T){T.classList.contains("focus-visible")||(T.classList.add("focus-visible"),T.setAttribute("data-focus-visible-added",""))}function u(T){T.hasAttribute("data-focus-visible-added")&&(T.classList.remove("focus-visible"),T.removeAttribute("data-focus-visible-added"))}function p(T){T.metaKey||T.altKey||T.ctrlKey||(s(r.activeElement)&&c(r.activeElement),n=!0)}function m(T){n=!1}function d(T){s(T.target)&&(n||f(T.target))&&c(T.target)}function h(T){s(T.target)&&(T.target.classList.contains("focus-visible")||T.target.hasAttribute("data-focus-visible-added"))&&(o=!0,window.clearTimeout(i),i=window.setTimeout(function(){o=!1},100),u(T.target))}function v(T){document.visibilityState==="hidden"&&(o&&(n=!0),B())}function B(){document.addEventListener("mousemove",z),document.addEventListener("mousedown",z),document.addEventListener("mouseup",z),document.addEventListener("pointermove",z),document.addEventListener("pointerdown",z),document.addEventListener("pointerup",z),document.addEventListener("touchmove",z),document.addEventListener("touchstart",z),document.addEventListener("touchend",z)}function re(){document.removeEventListener("mousemove",z),document.removeEventListener("mousedown",z),document.removeEventListener("mouseup",z),document.removeEventListener("pointermove",z),document.removeEventListener("pointerdown",z),document.removeEventListener("pointerup",z),document.removeEventListener("touchmove",z),document.removeEventListener("touchstart",z),document.removeEventListener("touchend",z)}function z(T){T.target.nodeName&&T.target.nodeName.toLowerCase()==="html"||(n=!1,re())}document.addEventListener("keydown",p,!0),document.addEventListener("mousedown",m,!0),document.addEventListener("pointerdown",m,!0),document.addEventListener("touchstart",m,!0),document.addEventListener("visibilitychange",v,!0),B(),r.addEventListener("focus",d,!0),r.addEventListener("blur",h,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var un=Ht(Sr=>{(function(e){var t=function(){try{return!!Symbol.iterator}catch(c){return!1}},r=t(),n=function(c){var u={next:function(){var p=c.shift();return{done:p===void 0,value:p}}};return r&&(u[Symbol.iterator]=function(){return u}),u},o=function(c){return encodeURIComponent(c).replace(/%20/g,"+")},i=function(c){return decodeURIComponent(String(c).replace(/\+/g," "))},a=function(){var c=function(p){Object.defineProperty(this,"_entries",{writable:!0,value:{}});var m=typeof p;if(m!=="undefined")if(m==="string")p!==""&&this._fromString(p);else if(p instanceof c){var d=this;p.forEach(function(re,z){d.append(z,re)})}else if(p!==null&&m==="object")if(Object.prototype.toString.call(p)==="[object Array]")for(var h=0;hd[0]?1:0}),c._entries&&(c._entries={});for(var p=0;p1?i(d[1]):"")}})})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Sr);(function(e){var t=function(){try{var o=new e.URL("b","http://a");return o.pathname="c d",o.href==="http://a/c%20d"&&o.searchParams}catch(i){return!1}},r=function(){var o=e.URL,i=function(f,c){typeof f!="string"&&(f=String(f)),c&&typeof c!="string"&&(c=String(c));var u=document,p;if(c&&(e.location===void 0||c!==e.location.href)){c=c.toLowerCase(),u=document.implementation.createHTMLDocument(""),p=u.createElement("base"),p.href=c,u.head.appendChild(p);try{if(p.href.indexOf(c)!==0)throw new Error(p.href)}catch(T){throw new Error("URL unable to set base "+c+" due to "+T)}}var m=u.createElement("a");m.href=f,p&&(u.body.appendChild(m),m.href=m.href);var d=u.createElement("input");if(d.type="url",d.value=f,m.protocol===":"||!/:/.test(m.href)||!d.checkValidity()&&!c)throw new TypeError("Invalid URL");Object.defineProperty(this,"_anchorElement",{value:m});var h=new e.URLSearchParams(this.search),v=!0,B=!0,re=this;["append","delete","set"].forEach(function(T){var Ke=h[T];h[T]=function(){Ke.apply(h,arguments),v&&(B=!1,re.search=h.toString(),B=!0)}}),Object.defineProperty(this,"searchParams",{value:h,enumerable:!0});var z=void 0;Object.defineProperty(this,"_updateSearchParams",{enumerable:!1,configurable:!1,writable:!1,value:function(){this.search!==z&&(z=this.search,B&&(v=!1,this.searchParams._fromString(this.search),v=!0))}})},a=i.prototype,s=function(f){Object.defineProperty(a,f,{get:function(){return this._anchorElement[f]},set:function(c){this._anchorElement[f]=c},enumerable:!0})};["hash","host","hostname","port","protocol"].forEach(function(f){s(f)}),Object.defineProperty(a,"search",{get:function(){return this._anchorElement.search},set:function(f){this._anchorElement.search=f,this._updateSearchParams()},enumerable:!0}),Object.defineProperties(a,{toString:{get:function(){var f=this;return function(){return f.href}}},href:{get:function(){return this._anchorElement.href.replace(/\?$/,"")},set:function(f){this._anchorElement.href=f,this._updateSearchParams()},enumerable:!0},pathname:{get:function(){return this._anchorElement.pathname.replace(/(^\/?)/,"/")},set:function(f){this._anchorElement.pathname=f},enumerable:!0},origin:{get:function(){var f={"http:":80,"https:":443,"ftp:":21}[this._anchorElement.protocol],c=this._anchorElement.port!=f&&this._anchorElement.port!=="";return this._anchorElement.protocol+"//"+this._anchorElement.hostname+(c?":"+this._anchorElement.port:"")},enumerable:!0},password:{get:function(){return""},set:function(f){},enumerable:!0},username:{get:function(){return""},set:function(f){},enumerable:!0}}),i.createObjectURL=function(f){return o.createObjectURL.apply(o,arguments)},i.revokeObjectURL=function(f){return o.revokeObjectURL.apply(o,arguments)},e.URL=i};if(t()||r(),e.location!==void 0&&!("origin"in e.location)){var n=function(){return e.location.protocol+"//"+e.location.hostname+(e.location.port?":"+e.location.port:"")};try{Object.defineProperty(e.location,"origin",{get:n,enumerable:!0})}catch(o){setInterval(function(){e.location.origin=n()},100)}}})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Sr)});var Qr=Ht((Lt,Kr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof Lt=="object"&&typeof Kr=="object"?Kr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Lt=="object"?Lt.ClipboardJS=r():t.ClipboardJS=r()})(Lt,function(){return function(){var e={686:function(n,o,i){"use strict";i.d(o,{default:function(){return ki}});var a=i(279),s=i.n(a),f=i(370),c=i.n(f),u=i(817),p=i.n(u);function m(j){try{return document.execCommand(j)}catch(O){return!1}}var d=function(O){var w=p()(O);return m("cut"),w},h=d;function v(j){var O=document.documentElement.getAttribute("dir")==="rtl",w=document.createElement("textarea");w.style.fontSize="12pt",w.style.border="0",w.style.padding="0",w.style.margin="0",w.style.position="absolute",w.style[O?"right":"left"]="-9999px";var k=window.pageYOffset||document.documentElement.scrollTop;return w.style.top="".concat(k,"px"),w.setAttribute("readonly",""),w.value=j,w}var B=function(O,w){var k=v(O);w.container.appendChild(k);var F=p()(k);return m("copy"),k.remove(),F},re=function(O){var w=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},k="";return typeof O=="string"?k=B(O,w):O instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(O==null?void 0:O.type)?k=B(O.value,w):(k=p()(O),m("copy")),k},z=re;function T(j){return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?T=function(w){return typeof w}:T=function(w){return w&&typeof Symbol=="function"&&w.constructor===Symbol&&w!==Symbol.prototype?"symbol":typeof w},T(j)}var Ke=function(){var O=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},w=O.action,k=w===void 0?"copy":w,F=O.container,q=O.target,Le=O.text;if(k!=="copy"&&k!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(q!==void 0)if(q&&T(q)==="object"&&q.nodeType===1){if(k==="copy"&&q.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(k==="cut"&&(q.hasAttribute("readonly")||q.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(Le)return z(Le,{container:F});if(q)return k==="cut"?h(q):z(q,{container:F})},We=Ke;function Ie(j){return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Ie=function(w){return typeof w}:Ie=function(w){return w&&typeof Symbol=="function"&&w.constructor===Symbol&&w!==Symbol.prototype?"symbol":typeof w},Ie(j)}function Ti(j,O){if(!(j instanceof O))throw new TypeError("Cannot call a class as a function")}function nn(j,O){for(var w=0;w0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof F.action=="function"?F.action:this.defaultAction,this.target=typeof F.target=="function"?F.target:this.defaultTarget,this.text=typeof F.text=="function"?F.text:this.defaultText,this.container=Ie(F.container)==="object"?F.container:document.body}},{key:"listenClick",value:function(F){var q=this;this.listener=c()(F,"click",function(Le){return q.onClick(Le)})}},{key:"onClick",value:function(F){var q=F.delegateTarget||F.currentTarget,Le=this.action(q)||"copy",Rt=We({action:Le,container:this.container,target:this.target(q),text:this.text(q)});this.emit(Rt?"success":"error",{action:Le,text:Rt,trigger:q,clearSelection:function(){q&&q.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(F){return yr("action",F)}},{key:"defaultTarget",value:function(F){var q=yr("target",F);if(q)return document.querySelector(q)}},{key:"defaultText",value:function(F){return yr("text",F)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(F){var q=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return z(F,q)}},{key:"cut",value:function(F){return h(F)}},{key:"isSupported",value:function(){var F=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],q=typeof F=="string"?[F]:F,Le=!!document.queryCommandSupported;return q.forEach(function(Rt){Le=Le&&!!document.queryCommandSupported(Rt)}),Le}}]),w}(s()),ki=Ri},828:function(n){var o=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,f){for(;s&&s.nodeType!==o;){if(typeof s.matches=="function"&&s.matches(f))return s;s=s.parentNode}}n.exports=a},438:function(n,o,i){var a=i(828);function s(u,p,m,d,h){var v=c.apply(this,arguments);return u.addEventListener(m,v,h),{destroy:function(){u.removeEventListener(m,v,h)}}}function f(u,p,m,d,h){return typeof u.addEventListener=="function"?s.apply(null,arguments):typeof m=="function"?s.bind(null,document).apply(null,arguments):(typeof u=="string"&&(u=document.querySelectorAll(u)),Array.prototype.map.call(u,function(v){return s(v,p,m,d,h)}))}function c(u,p,m,d){return function(h){h.delegateTarget=a(h.target,p),h.delegateTarget&&d.call(u,h)}}n.exports=f},879:function(n,o){o.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},o.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||o.node(i[0]))},o.string=function(i){return typeof i=="string"||i instanceof String},o.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(n,o,i){var a=i(879),s=i(438);function f(m,d,h){if(!m&&!d&&!h)throw new Error("Missing required arguments");if(!a.string(d))throw new TypeError("Second argument must be a String");if(!a.fn(h))throw new TypeError("Third argument must be a Function");if(a.node(m))return c(m,d,h);if(a.nodeList(m))return u(m,d,h);if(a.string(m))return p(m,d,h);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(m,d,h){return m.addEventListener(d,h),{destroy:function(){m.removeEventListener(d,h)}}}function u(m,d,h){return Array.prototype.forEach.call(m,function(v){v.addEventListener(d,h)}),{destroy:function(){Array.prototype.forEach.call(m,function(v){v.removeEventListener(d,h)})}}}function p(m,d,h){return s(document.body,m,d,h)}n.exports=f},817:function(n){function o(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var f=window.getSelection(),c=document.createRange();c.selectNodeContents(i),f.removeAllRanges(),f.addRange(c),a=f.toString()}return a}n.exports=o},279:function(n){function o(){}o.prototype={on:function(i,a,s){var f=this.e||(this.e={});return(f[i]||(f[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var f=this;function c(){f.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),f=0,c=s.length;for(f;f{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var is=/["'&<>]/;Jo.exports=as;function as(e){var t=""+e,r=is.exec(t);if(!r)return t;var n,o="",i=0,a=0;for(i=r.index;i0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[n++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function W(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var n=r.call(e),o,i=[],a;try{for(;(t===void 0||t-- >0)&&!(o=n.next()).done;)i.push(o.value)}catch(s){a={error:s}}finally{try{o&&!o.done&&(r=n.return)&&r.call(n)}finally{if(a)throw a.error}}return i}function D(e,t,r){if(r||arguments.length===2)for(var n=0,o=t.length,i;n1||s(m,d)})})}function s(m,d){try{f(n[m](d))}catch(h){p(i[0][3],h)}}function f(m){m.value instanceof Xe?Promise.resolve(m.value.v).then(c,u):p(i[0][2],m)}function c(m){s("next",m)}function u(m){s("throw",m)}function p(m,d){m(d),i.shift(),i.length&&s(i[0][0],i[0][1])}}function mn(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof xe=="function"?xe(e):e[Symbol.iterator](),r={},n("next"),n("throw"),n("return"),r[Symbol.asyncIterator]=function(){return this},r);function n(i){r[i]=e[i]&&function(a){return new Promise(function(s,f){a=e[i](a),o(s,f,a.done,a.value)})}}function o(i,a,s,f){Promise.resolve(f).then(function(c){i({value:c,done:s})},a)}}function A(e){return typeof e=="function"}function at(e){var t=function(n){Error.call(n),n.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var $t=at(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(n,o){return o+1+") "+n.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function De(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Fe=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,n,o,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=xe(a),f=s.next();!f.done;f=s.next()){var c=f.value;c.remove(this)}}catch(v){t={error:v}}finally{try{f&&!f.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var u=this.initialTeardown;if(A(u))try{u()}catch(v){i=v instanceof $t?v.errors:[v]}var p=this._finalizers;if(p){this._finalizers=null;try{for(var m=xe(p),d=m.next();!d.done;d=m.next()){var h=d.value;try{dn(h)}catch(v){i=i!=null?i:[],v instanceof $t?i=D(D([],W(i)),W(v.errors)):i.push(v)}}}catch(v){n={error:v}}finally{try{d&&!d.done&&(o=m.return)&&o.call(m)}finally{if(n)throw n.error}}}if(i)throw new $t(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)dn(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&De(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&De(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Or=Fe.EMPTY;function It(e){return e instanceof Fe||e&&"closed"in e&&A(e.remove)&&A(e.add)&&A(e.unsubscribe)}function dn(e){A(e)?e():e.unsubscribe()}var Ae={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var st={setTimeout:function(e,t){for(var r=[],n=2;n0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var n=this,o=this,i=o.hasError,a=o.isStopped,s=o.observers;return i||a?Or:(this.currentObservers=null,s.push(r),new Fe(function(){n.currentObservers=null,De(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var n=this,o=n.hasError,i=n.thrownError,a=n.isStopped;o?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new U;return r.source=this,r},t.create=function(r,n){return new wn(r,n)},t}(U);var wn=function(e){ne(t,e);function t(r,n){var o=e.call(this)||this;return o.destination=r,o.source=n,o}return t.prototype.next=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.next)===null||o===void 0||o.call(n,r)},t.prototype.error=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.error)===null||o===void 0||o.call(n,r)},t.prototype.complete=function(){var r,n;(n=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||n===void 0||n.call(r)},t.prototype._subscribe=function(r){var n,o;return(o=(n=this.source)===null||n===void 0?void 0:n.subscribe(r))!==null&&o!==void 0?o:Or},t}(E);var Et={now:function(){return(Et.delegate||Date).now()},delegate:void 0};var wt=function(e){ne(t,e);function t(r,n,o){r===void 0&&(r=1/0),n===void 0&&(n=1/0),o===void 0&&(o=Et);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=n,i._timestampProvider=o,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=n===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,n),i}return t.prototype.next=function(r){var n=this,o=n.isStopped,i=n._buffer,a=n._infiniteTimeWindow,s=n._timestampProvider,f=n._windowTime;o||(i.push(r),!a&&i.push(s.now()+f)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var n=this._innerSubscribe(r),o=this,i=o._infiniteTimeWindow,a=o._buffer,s=a.slice(),f=0;f0?e.prototype.requestAsyncId.call(this,r,n,o):(r.actions.push(this),r._scheduled||(r._scheduled=ut.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,n,o){var i;if(o===void 0&&(o=0),o!=null?o>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,n,o);var a=r.actions;n!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==n&&(ut.cancelAnimationFrame(n),r._scheduled=void 0)},t}(Ut);var On=function(e){ne(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var n=this._scheduled;this._scheduled=void 0;var o=this.actions,i;r=r||o.shift();do if(i=r.execute(r.state,r.delay))break;while((r=o[0])&&r.id===n&&o.shift());if(this._active=!1,i){for(;(r=o[0])&&r.id===n&&o.shift();)r.unsubscribe();throw i}},t}(Wt);var we=new On(Tn);var R=new U(function(e){return e.complete()});function Dt(e){return e&&A(e.schedule)}function kr(e){return e[e.length-1]}function Qe(e){return A(kr(e))?e.pop():void 0}function Se(e){return Dt(kr(e))?e.pop():void 0}function Vt(e,t){return typeof kr(e)=="number"?e.pop():t}var pt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function zt(e){return A(e==null?void 0:e.then)}function Nt(e){return A(e[ft])}function qt(e){return Symbol.asyncIterator&&A(e==null?void 0:e[Symbol.asyncIterator])}function Kt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Ki(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var Qt=Ki();function Yt(e){return A(e==null?void 0:e[Qt])}function Gt(e){return ln(this,arguments,function(){var r,n,o,i;return Pt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,Xe(r.read())];case 3:return n=a.sent(),o=n.value,i=n.done,i?[4,Xe(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,Xe(o)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function Bt(e){return A(e==null?void 0:e.getReader)}function $(e){if(e instanceof U)return e;if(e!=null){if(Nt(e))return Qi(e);if(pt(e))return Yi(e);if(zt(e))return Gi(e);if(qt(e))return _n(e);if(Yt(e))return Bi(e);if(Bt(e))return Ji(e)}throw Kt(e)}function Qi(e){return new U(function(t){var r=e[ft]();if(A(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Yi(e){return new U(function(t){for(var r=0;r=2;return function(n){return n.pipe(e?_(function(o,i){return e(o,i,n)}):me,Oe(1),r?He(t):zn(function(){return new Xt}))}}function Nn(){for(var e=[],t=0;t=2,!0))}function fe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new E}:t,n=e.resetOnError,o=n===void 0?!0:n,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,f=s===void 0?!0:s;return function(c){var u,p,m,d=0,h=!1,v=!1,B=function(){p==null||p.unsubscribe(),p=void 0},re=function(){B(),u=m=void 0,h=v=!1},z=function(){var T=u;re(),T==null||T.unsubscribe()};return g(function(T,Ke){d++,!v&&!h&&B();var We=m=m!=null?m:r();Ke.add(function(){d--,d===0&&!v&&!h&&(p=jr(z,f))}),We.subscribe(Ke),!u&&d>0&&(u=new et({next:function(Ie){return We.next(Ie)},error:function(Ie){v=!0,B(),p=jr(re,o,Ie),We.error(Ie)},complete:function(){h=!0,B(),p=jr(re,a),We.complete()}}),$(T).subscribe(u))})(c)}}function jr(e,t){for(var r=[],n=2;ne.next(document)),e}function K(e,t=document){return Array.from(t.querySelectorAll(e))}function V(e,t=document){let r=se(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function se(e,t=document){return t.querySelector(e)||void 0}function _e(){return document.activeElement instanceof HTMLElement&&document.activeElement||void 0}function tr(e){return L(b(document.body,"focusin"),b(document.body,"focusout")).pipe(ke(1),l(()=>{let t=_e();return typeof t!="undefined"?e.contains(t):!1}),N(e===_e()),Y())}function Be(e){return{x:e.offsetLeft,y:e.offsetTop}}function Yn(e){return L(b(window,"load"),b(window,"resize")).pipe(Ce(0,we),l(()=>Be(e)),N(Be(e)))}function rr(e){return{x:e.scrollLeft,y:e.scrollTop}}function dt(e){return L(b(e,"scroll"),b(window,"resize")).pipe(Ce(0,we),l(()=>rr(e)),N(rr(e)))}var Bn=function(){if(typeof Map!="undefined")return Map;function e(t,r){var n=-1;return t.some(function(o,i){return o[0]===r?(n=i,!0):!1}),n}return function(){function t(){this.__entries__=[]}return Object.defineProperty(t.prototype,"size",{get:function(){return this.__entries__.length},enumerable:!0,configurable:!0}),t.prototype.get=function(r){var n=e(this.__entries__,r),o=this.__entries__[n];return o&&o[1]},t.prototype.set=function(r,n){var o=e(this.__entries__,r);~o?this.__entries__[o][1]=n:this.__entries__.push([r,n])},t.prototype.delete=function(r){var n=this.__entries__,o=e(n,r);~o&&n.splice(o,1)},t.prototype.has=function(r){return!!~e(this.__entries__,r)},t.prototype.clear=function(){this.__entries__.splice(0)},t.prototype.forEach=function(r,n){n===void 0&&(n=null);for(var o=0,i=this.__entries__;o0},e.prototype.connect_=function(){!zr||this.connected_||(document.addEventListener("transitionend",this.onTransitionEnd_),window.addEventListener("resize",this.refresh),xa?(this.mutationsObserver_=new MutationObserver(this.refresh),this.mutationsObserver_.observe(document,{attributes:!0,childList:!0,characterData:!0,subtree:!0})):(document.addEventListener("DOMSubtreeModified",this.refresh),this.mutationEventsAdded_=!0),this.connected_=!0)},e.prototype.disconnect_=function(){!zr||!this.connected_||(document.removeEventListener("transitionend",this.onTransitionEnd_),window.removeEventListener("resize",this.refresh),this.mutationsObserver_&&this.mutationsObserver_.disconnect(),this.mutationEventsAdded_&&document.removeEventListener("DOMSubtreeModified",this.refresh),this.mutationsObserver_=null,this.mutationEventsAdded_=!1,this.connected_=!1)},e.prototype.onTransitionEnd_=function(t){var r=t.propertyName,n=r===void 0?"":r,o=ya.some(function(i){return!!~n.indexOf(i)});o&&this.refresh()},e.getInstance=function(){return this.instance_||(this.instance_=new e),this.instance_},e.instance_=null,e}(),Jn=function(e,t){for(var r=0,n=Object.keys(t);r0},e}(),Zn=typeof WeakMap!="undefined"?new WeakMap:new Bn,eo=function(){function e(t){if(!(this instanceof e))throw new TypeError("Cannot call a class as a function.");if(!arguments.length)throw new TypeError("1 argument required, but only 0 present.");var r=Ea.getInstance(),n=new Ra(t,r,this);Zn.set(this,n)}return e}();["observe","unobserve","disconnect"].forEach(function(e){eo.prototype[e]=function(){var t;return(t=Zn.get(this))[e].apply(t,arguments)}});var ka=function(){return typeof nr.ResizeObserver!="undefined"?nr.ResizeObserver:eo}(),to=ka;var ro=new E,Ha=I(()=>H(new to(e=>{for(let t of e)ro.next(t)}))).pipe(x(e=>L(Te,H(e)).pipe(C(()=>e.disconnect()))),J(1));function de(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){return Ha.pipe(S(t=>t.observe(e)),x(t=>ro.pipe(_(({target:r})=>r===e),C(()=>t.unobserve(e)),l(()=>de(e)))),N(de(e)))}function bt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function ar(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}var no=new E,Pa=I(()=>H(new IntersectionObserver(e=>{for(let t of e)no.next(t)},{threshold:0}))).pipe(x(e=>L(Te,H(e)).pipe(C(()=>e.disconnect()))),J(1));function sr(e){return Pa.pipe(S(t=>t.observe(e)),x(t=>no.pipe(_(({target:r})=>r===e),C(()=>t.unobserve(e)),l(({isIntersecting:r})=>r))))}function oo(e,t=16){return dt(e).pipe(l(({y:r})=>{let n=de(e),o=bt(e);return r>=o.height-n.height-t}),Y())}var cr={drawer:V("[data-md-toggle=drawer]"),search:V("[data-md-toggle=search]")};function io(e){return cr[e].checked}function qe(e,t){cr[e].checked!==t&&cr[e].click()}function je(e){let t=cr[e];return b(t,"change").pipe(l(()=>t.checked),N(t.checked))}function $a(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Ia(){return L(b(window,"compositionstart").pipe(l(()=>!0)),b(window,"compositionend").pipe(l(()=>!1))).pipe(N(!1))}function ao(){let e=b(window,"keydown").pipe(_(t=>!(t.metaKey||t.ctrlKey)),l(t=>({mode:io("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),_(({mode:t,type:r})=>{if(t==="global"){let n=_e();if(typeof n!="undefined")return!$a(n,r)}return!0}),fe());return Ia().pipe(x(t=>t?R:e))}function Me(){return new URL(location.href)}function ot(e){location.href=e.href}function so(){return new E}function co(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)co(e,r)}function M(e,t,...r){let n=document.createElement(e);if(t)for(let o of Object.keys(t))typeof t[o]!="undefined"&&(typeof t[o]!="boolean"?n.setAttribute(o,t[o]):n.setAttribute(o,""));for(let o of r)co(n,o);return n}function fr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function fo(){return location.hash.substring(1)}function uo(e){let t=M("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Fa(){return b(window,"hashchange").pipe(l(fo),N(fo()),_(e=>e.length>0),J(1))}function po(){return Fa().pipe(l(e=>se(`[id="${e}"]`)),_(e=>typeof e!="undefined"))}function Nr(e){let t=matchMedia(e);return Zt(r=>t.addListener(()=>r(t.matches))).pipe(N(t.matches))}function lo(){let e=matchMedia("print");return L(b(window,"beforeprint").pipe(l(()=>!0)),b(window,"afterprint").pipe(l(()=>!1))).pipe(N(e.matches))}function qr(e,t){return e.pipe(x(r=>r?t():R))}function ur(e,t={credentials:"same-origin"}){return ve(fetch(`${e}`,t)).pipe(ce(()=>R),x(r=>r.status!==200?Tt(()=>new Error(r.statusText)):H(r)))}function Ue(e,t){return ur(e,t).pipe(x(r=>r.json()),J(1))}function mo(e,t){let r=new DOMParser;return ur(e,t).pipe(x(n=>n.text()),l(n=>r.parseFromString(n,"text/xml")),J(1))}function pr(e){let t=M("script",{src:e});return I(()=>(document.head.appendChild(t),L(b(t,"load"),b(t,"error").pipe(x(()=>Tt(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(l(()=>{}),C(()=>document.head.removeChild(t)),Oe(1))))}function ho(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function bo(){return L(b(window,"scroll",{passive:!0}),b(window,"resize",{passive:!0})).pipe(l(ho),N(ho()))}function vo(){return{width:innerWidth,height:innerHeight}}function go(){return b(window,"resize",{passive:!0}).pipe(l(vo),N(vo()))}function yo(){return Q([bo(),go()]).pipe(l(([e,t])=>({offset:e,size:t})),J(1))}function lr(e,{viewport$:t,header$:r}){let n=t.pipe(X("size")),o=Q([n,r]).pipe(l(()=>Be(e)));return Q([r,t,o]).pipe(l(([{height:i},{offset:a,size:s},{x:f,y:c}])=>({offset:{x:a.x-f,y:a.y-c+i},size:s})))}(()=>{function e(n,o){parent.postMessage(n,o||"*")}function t(...n){return n.reduce((o,i)=>o.then(()=>new Promise(a=>{let s=document.createElement("script");s.src=i,s.onload=a,document.body.appendChild(s)})),Promise.resolve())}var r=class{constructor(n){this.url=n,this.onerror=null,this.onmessage=null,this.onmessageerror=null,this.m=a=>{a.source===this.w&&(a.stopImmediatePropagation(),this.dispatchEvent(new MessageEvent("message",{data:a.data})),this.onmessage&&this.onmessage(a))},this.e=(a,s,f,c,u)=>{if(s===this.url.toString()){let p=new ErrorEvent("error",{message:a,filename:s,lineno:f,colno:c,error:u});this.dispatchEvent(p),this.onerror&&this.onerror(p)}};let o=new EventTarget;this.addEventListener=o.addEventListener.bind(o),this.removeEventListener=o.removeEventListener.bind(o),this.dispatchEvent=o.dispatchEvent.bind(o);let i=document.createElement("iframe");i.width=i.height=i.frameBorder="0",document.body.appendChild(this.iframe=i),this.w.document.open(),this.w.document.write(` + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

How to cite uPheno

+

Papers

+

uPheno 2

+
    +
  • Matentzoglu N, Osumi-Sutherland D, Balhoff JP, Bello S, Bradford Y, Cardmody L, Grove C, Harris MA, Harris N, Köhler S, McMurry J, Mungall C, Munoz-Torres M, Pilgrim C, Robb S, Robinson PN, Segerdell E, Vasilevsky N, Haendel M. uPheno 2: Framework for standardised representation of phenotypes across species. 2019 Apr 8. http://dx.doi.org/10.7490/f1000research.1116540.1
  • +
+

Original uPheno

+ +

Entity-Quality definitions and phenotype modelling

+ + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/contributing/index.html b/contributing/index.html new file mode 100644 index 00000000..cfb6f3ad --- /dev/null +++ b/contributing/index.html @@ -0,0 +1,1391 @@ + + + + + + + + + + + + + + + + + + + + + + Contributing - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

How to contribute to UPHENO

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/howto/add-relation-extension/index.html b/howto/add-relation-extension/index.html new file mode 100644 index 00000000..d59e4ceb --- /dev/null +++ b/howto/add-relation-extension/index.html @@ -0,0 +1,1410 @@ + + + + + + + + + + + + + + + + + + + + + + Add the uPheno direct relation extension - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

How to add the uPheno direct relation extension

+

EQ definitions are powerful tools for reconciling phenotypes across species and driving reasoning. However, they are not all that useful for many "normal" users of our ontologies.

+

We have developed a little workflow extension to take care of that.

+
    +
  1. As usual please follow the steps to install the custom uPheno Makefile extension first.
  2. +
  3. Now add a new component to your ont-odk.yaml file (e.g. src/ontology/mp-odk.yaml):
  4. +
+
components:
+  products:
+    - filename: eq-relations.owl
+
+
    +
  1. We can now choose if we want to add the component to your edit file as well. To do that, follow the instructions on adding an import (i.e. adding the component to the edit file and catalog file). The IRI of the component is http://purl.obolibrary.org/obo/YOURONTOLOGY/components/eq-relations.owl. For example, for MP, the IRI is http://purl.obolibrary.org/obo/mp/components/eq-relations.owl.
  2. +
  3. Now we can generate the component:
  4. +
+
sh run.sh make components/eq-relations.owl
+
+

This command will be run automatically during a release (prepare_release).

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/howto/custom-upheno-makefile/index.html b/howto/custom-upheno-makefile/index.html new file mode 100644 index 00000000..3663f348 --- /dev/null +++ b/howto/custom-upheno-makefile/index.html @@ -0,0 +1,1410 @@ + + + + + + + + + + + + + + + + + + + + + + Add custom uPheno Makefile - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Add custom uPheno Makefile

+

The custom uPheno Makefile is an extension to your normal custom Makefile (for example, hp.Makefile, mp.Makefile, etc), located in the src/ontology directory of your ODK set up.

+

To install it:

+

(1) Open your normal custom Makefile and add a line in the very end:

+
include pheno.Makefile
+
+

(2) Now download the custom Makefile:

+

https://raw.githubusercontent.com/obophenotype/upheno/master/src/ontology/config/pheno.Makefile

+

and save it in your src/ontology directory.

+

Feel free to use, for example, wget:

+
cd src/ontology
+wget https://raw.githubusercontent.com/obophenotype/upheno/master/src/ontology/config/pheno.Makefile -O pheno.Makefile
+
+

From now on you can simply run

+
sh run.sh make update_pheno_makefile
+
+

whenever you wish to synchronise the Makefile with the uPheno repo.

+

(Note: it would probably be good to add a GitHub action that does that automatically.)

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/howto/editors_workflow/index.html b/howto/editors_workflow/index.html new file mode 100644 index 00000000..42a30069 --- /dev/null +++ b/howto/editors_workflow/index.html @@ -0,0 +1,1579 @@ + + + + + + + + + + + + + + + + + + + + + + Phenotype Ontology Editors' Workflow - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Phenotype Ontology Editors' Workflow

+ +
    +
  • Phenotype Ontology Working Group Meetings agenda and minutes gdoc.
  • +
  • phenotype-ontologies slack channel: to send meeting reminders; ask for agenda items; questions; discussions etc.
  • +
  • Dead simple owl design pattern (DOS-DP) Documentation +
  • +
  • Validate DOS-DP yaml templates:
      +
    1. yamllint: yaml syntax validator +
    2. +
    3. +

      Congiguring yamllint + You can ignore the error line too long yaml syntax errors for dos-dp yaml templates. + You can create a custom configuration file for yamllint in your home folder: + sh + touch ~/.config/yamllint/config + The content of the config file should look like this: + ```yaml + # Custom configuration file for yamllint + # It extends the default conf by adjusting some options.

      +

      extends: default

      +

      rules: + line-length: + max: 80 # 80 chars should be enough, but don't fail if a line is longer

      +

      max: 140 # allow long lines

      +
      level: warning
      +allow-non-breakable-words: true
      +allow-non-breakable-inline-mappings: true
      +
      +

      `` +The custom config should turn theerror line too longerrors to warnings. + 2. [DOS-DP validator:](https://incatools.github.io/dead_simple_owl_design_patterns/validator/): DOS-DP format validator +* [Installing ](https://github.com/INCATools/dead_simple_owl_design_patterns):pip install dosdp`

      +
    4. +
    +
  • +
+

Patternisation is the process of ensuring that all entity quality (EQ) descriptions from textual phenotype term definitions have a logical definition pattern. A pattern is a standard format for describing a phenotype that includes a quality and an entity. For example, "increased body size" is a pattern that includes the quality "increased" and the entity "body size." The goal of patternisation is to make the EQ descriptions more uniform and machine-readable, which facilitates downstream analysis.

+ +

The first step in the Phenotype Ontology Editors' Workflow is to identify a group of related phenotypes from diverse organisms. This can be done by considering proposals from phenotype editors or by using the pattern suggestion pipeline. +The phenotype editors may propose a group of related phenotypes based on their domain knowledge, while the pattern suggestion pipeline uses semantic similarity and shared Phenotype And Trait Ontology (PATO) quality terms to identify patterns in phenotype terms from different organism-specific ontologies.

+

2. Propose a phenotype pattern

+

Once a group of related phenotypes is identified, the editors propose a phenotype pattern. To do this, they create a Github issue to request the phenotype pattern template in the uPheno repository. +Alternatively, a new template can be proposed at a phenotype editors' meeting which can lead to the creation of a new term request as a Github issue. +Ideally, the proposed phenotype pattern should include an appropriate PATO quality term for logical definition, use cases, term examples, and a textual definition pattern for the phenotype terms.

+

3. Discuss the new phenotype pattern draft at the regular uPheno phenotype editors meeting

+

The next step is to discuss the new phenotype pattern draft at the regular uPheno phenotype editors meeting. During the meeting, the editors' comments and suggestions for improvements are collected as comments on the DOS-DP yaml template in the corresponding Github pull request. Based on the feedback and discussions, a consensus on improvements should be achieved. +The DOS-DP yaml template is named should start with a lower case letter, should be informative, and must include the PATO quality term. +A Github pull request is created for the DOS-DP yaml template.

+
    +
  • A DOS-DP phenotype pattern template example:
  • +
+
---
+pattern_name: ??pattern_and_file_name
+
+pattern_iri: http://purl.obolibrary.org/obo/upheno/patterns-dev/??pattern_and_file_name.yaml
+
+description: 'A description that helps people chose this pattern for the appropriate scenario.'
+
+#  examples:
+#    - example_IRI-1  # term name
+#    - example_IRI-2  # term name
+#    - example_IRI-3  # term name
+#    - http://purl.obolibrary.org/obo/XXXXXXXXXX  # XXXXXXXX
+
+contributors:
+  - https://orcid.org/XXXX-XXXX-XXXX-XXXX  # Yyy Yyyyyyyyy
+
+classes:
+  process_quality: PATO:0001236
+  abnormal: PATO:0000460
+  anatomical_entity: UBERON:0001062
+
+relations:
+  characteristic_of: RO:0000052
+  has_modifier: RO:0002573
+  has_part: BFO:0000051
+
+annotationProperties:
+  exact_synonym: oio:hasExactSynonym
+  related_synonym: oio:hasRelatedSynonym
+  xref: oio:hasDbXref
+
+vars:
+  var??: "'anatomical_entity'"  # "'variable_range'"
+
+name:
+  text: "trait ?? %s"
+  vars:
+    - var??
+
+annotations:
+  - annotationProperty: exact_synonym
+    text: "? of %s"
+    vars:
+      - var??
+
+  - annotationProperty: related_synonym
+    text: "? %s"
+    vars:
+      - var??
+
+  - annotationProperty: xref
+    text: "AUTO:patterns/patterns/chemical_role_attribute"
+
+def:
+  text: "A trait that ?? %s."
+  vars:
+    - var??
+
+equivalentTo:
+  text: "'has_part' some (
+    'XXXXXXXXXXXXXXXXX' and
+    ('characteristic_of' some %s) and
+    ('has_modifier' some 'abnormal')
+    )"
+  vars:
+    - var??
+...
+
+

4. Review the candidate phenotype pattern

+

Once a consensus on the improvements for a particular template is achieved, they are incorporated into the DOS-DP yaml file. Typically, the improvements are applied to the template some time before a subsequent ontology editor's meeting. There should be enough time for off-line review of the proposed pattern to allow community feedback. +The improved phenotype pattern candidate draft should get approval from the community at one of the regular ontology editors' call or in a Github comment. +The ontology editors who approve the pattern provide their ORCIDs and they are credited as contributors in an appropriate field of the DOS-DP pattern template.

+

5. Add the community-approved phenotype pattern template to uPheno

+

Once the community-approved phenotype pattern template is created, it is added to the uPheno Github repository. +The approved DOS-DP yaml phenotype pattern template should pass quality control (QC) steps. +1. Validate yaml syntax: yamllint +2. Validate DOS-DP +Use DOSDP Validator. +* To validate a template using the command line interface, execute: +```sh +yamllint +dosdp validate -i

+

After successfully passing QC, the responsible editor merges the approved pull request, and the phenotype pattern becomes part of the uPheno phenotype pattern template collection.

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/howto/pattern-merge-replace-workflow/index.html b/howto/pattern-merge-replace-workflow/index.html new file mode 100644 index 00000000..4db1fe7c --- /dev/null +++ b/howto/pattern-merge-replace-workflow/index.html @@ -0,0 +1,1535 @@ + + + + + + + + + + + + + + + + + + + + + + Pattern merge - replace workflow - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + +

Pattern merge - replace workflow

+

This document is on how to merge new DOSDP design patterns into an ODK ontology and then how to replace the old classes with the new ones.

+

1. You need the tables in tsv format with the DOSDP filler data. Download the tsv tables to

+
$ODK-ONTOLOGY/src/patterns/data/default/
+
+

Make sure that the tsv filenames match that of the relevant yaml DOSDP pattern files.

+

2. Add the new matching pattern yaml filename to

+
$ODK-ONTOLOGY/src/patterns/dosdp-patterns/external.txt
+
+

3. Import the new pattern templates that you have just added to the external.txt list from external sources into the current working repository

+
cd ODK-ONTOLOGY/src/ontology
+sh run.sh make update_patterns
+
+

4. make definitions.owl

+
cd ODK-ONTOLOGY/src/ontology
+sh run.sh make ../patterns/definitions.owl IMP=false
+
+

5. Remove old classes and replace them with the equivalent and patternised new classes

+
cd ODK-ONTOLOGY/src/ontology
+sh run.sh make remove_patternised_classes
+
+

6. Announce the pattern migration in an appropriate channel, for example on the phenotype-ontologies Slack channel.

+

For example:

+
+

I have migrated the ... table and changed the tab colour to blue. +You can delete the tab if you wish.

+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/howto/run-upheno2-release/index.html b/howto/run-upheno2-release/index.html new file mode 100644 index 00000000..65672b01 --- /dev/null +++ b/howto/run-upheno2-release/index.html @@ -0,0 +1,1402 @@ + + + + + + + + + + + + + + + + + + + + + + How to run a uPheno 2 release - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

How to run a uPheno 2 release

+

In order to run a release you will have to have completed the steps to set up s3.

+
    +
  1. Clone https://github.com/obophenotype/upheno-dev
  2. +
  3. cd src/scripts
  4. +
  5. sh upheno_pipeline.sh
  6. +
  7. cd ../ontology
  8. +
  9. make prepare_upload S3_VERSION=2022-06-19
  10. +
  11. make deploy S3_VERSION=2022-06-19
  12. +
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/howto/set-up-s3/index.html b/howto/set-up-s3/index.html new file mode 100644 index 00000000..f667ec4d --- /dev/null +++ b/howto/set-up-s3/index.html @@ -0,0 +1,1429 @@ + + + + + + + + + + + + + + + + + + + + + + How to set up s3 for uploading upheno data files - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

How to set yourself up for S3

+

To be able to upload new uPheno release to the uPheno S3 bucket, you need to set yourself up for S3 first.

+
    +
  1. Download and install AWS CLI
  2. +
  3. Obtain secrets from BBOP
  4. +
  5. Add configuration for secrets
  6. +
+

1. Download and install AWS CLI

+

The most convenient way to interact with S3 is the AWS Command Line Interface (CLI). You can find the installers and install instructions on that page (different depending on your Operation System): +- For Mac +- For Windows

+

2. Obtain secrets from BBOP

+

Next, you need to ask someone at BBOP (such as Chris Mungall or Seth Carbon) to provide you with an account that gives you access to the BBOP s3 buckets. You will have to provide a username. You will receive: +- User name +- Access key ID- +- Secret access key +- Console link to sign into bucket

+

3. Add configuration for secrets

+

You will now have to set up your local system. You will create two files:

+
$ less ~/.aws/config 
+[default]
+region = us-east-1
+
+

and

+
$ less ~/.aws/credentials
+[default]
+aws_access_key_id = ***
+aws_secret_access_key = ***
+
+

in ~/.aws/credentials make sure you add the correct keys as provided above.

+

4. Write to your bucket

+

Now, you should be set up to write to your s3 bucket. Note that in order for your data to be accessible through https after your upload, you need to add --acl public read.

+
aws s3 sync --exclude "*.DS_Store*" my/data-dir s3://bbop-ontologies/myproject/data-dir --acl public-read
+
+

If you have previously pushed data to the same location, you wont be able to set it to "publicly readable" by simply rerunning the sync command. If you want to publish previously private data, follow the instructions here, e.g.:

+
aws s3api put-object-acl --bucket s3://bbop-ontologies/myproject/data-dir --key exampleobject --acl public-read
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/index.html b/index.html new file mode 100644 index 00000000..40149916 --- /dev/null +++ b/index.html @@ -0,0 +1,1390 @@ + + + + + + + + + + + + + + + + + + + + Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

UPHENO Ontology Documentation

+

Welcome to the UPHENO documentation!

+

It is entirely empty at the moment so look no further!

+

You can find descriptions of the standard ontology engineering workflows here.

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/odk-workflows/ContinuousIntegration/index.html b/odk-workflows/ContinuousIntegration/index.html new file mode 100644 index 00000000..63407cd6 --- /dev/null +++ b/odk-workflows/ContinuousIntegration/index.html @@ -0,0 +1,1407 @@ + + + + + + + + + + + + + + + + + + + + + + Manage Continuous Integration - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Introduction to Continuous Integration Workflows with ODK

+

Historically, most repos have been using Travis CI for continuous integration testing and building, but due to +runtime restrictions, we recently switched a lot of our repos to GitHub actions. You can set up your repo with CI by adding +this to your configuration file (src/ontology/upheno-odk.yaml):

+
ci:
+  - github_actions
+
+

When updateing your repo, you will notice a new file being added: .github/workflows/qc.yml.

+

This file contains your CI logic, so if you need to change, or add anything, this is the place!

+

Alternatively, if your repo is in GitLab instead of GitHub, you can set up your repo with GitLab CI by adding +this to your configuration file (src/ontology/upheno-odk.yaml):

+
ci:
+  - gitlab-ci
+
+

This will add a file called .gitlab-ci.yml in the root of your repo.

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/odk-workflows/EditorsWorkflow/index.html b/odk-workflows/EditorsWorkflow/index.html new file mode 100644 index 00000000..b5a6bf4d --- /dev/null +++ b/odk-workflows/EditorsWorkflow/index.html @@ -0,0 +1,1713 @@ + + + + + + + + + + + + + + + + + + + + + + Editors Workflow - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + +

Editors Workflow

+

The editors workflow is one of the formal workflows to ensure that the ontology is developed correctly according to ontology engineering principles. There are a few different editors workflows:

+
    +
  1. Local editing workflow: Editing the ontology in your local environment by hand, using tools such as Protégé, ROBOT templates or DOSDP patterns.
  2. +
  3. Completely automated data pipeline (GitHub Actions)
  4. +
  5. DROID workflow
  6. +
+

This document only covers the first editing workflow, but more will be added in the future

+

Local editing workflow

+

Workflow requirements:

+
    +
  • git
  • +
  • github
  • +
  • docker
  • +
  • editing tool of choice, e.g. Protégé, your favourite text editor, etc
  • +
+

1. Create issue

+

Ensure that there is a ticket on your issue tracker that describes the change you are about to make. While this seems optional, this is a very important part of the social contract of building an ontology - no change to the ontology should be performed without a good ticket, describing the motivation and nature of the intended change.

+

2. Update main branch

+

In your local environment (e.g. your laptop), make sure you are on the main (prev. master) branch and ensure that you have all the upstream changes, for example:

+
git checkout master
+git pull
+
+

3. Create feature branch

+

Create a new branch. Per convention, we try to use meaningful branch names such as: +- issue23removeprocess (where issue 23 is the related issue on GitHub) +- issue26addcontributor +- release20210101 (for releases)

+

On your command line, this looks like this:

+
git checkout -b issue23removeprocess
+
+

4. Perform edit

+

Using your editor of choice, perform the intended edit. For example:

+

Protégé

+
    +
  1. Open src/ontology/upheno-edit.owl in Protégé
  2. +
  3. Make the change
  4. +
  5. Save the file
  6. +
+

TextEdit

+
    +
  1. Open src/ontology/upheno-edit.owl in TextEdit (or Sublime, Atom, Vim, Nano)
  2. +
  3. Make the change
  4. +
  5. Save the file
  6. +
+

Consider the following when making the edit.

+
    +
  1. According to our development philosophy, the only places that should be manually edited are:
      +
    • src/ontology/upheno-edit.owl
    • +
    • Any ROBOT templates you chose to use (the TSV files only)
    • +
    • Any DOSDP data tables you chose to use (the TSV files, and potentially the associated patterns)
    • +
    • components (anything in src/ontology/components), see here.
    • +
    +
  2. +
  3. Imports should not be edited (any edits will be flushed out with the next update). However, refreshing imports is a potentially breaking change - and is discussed elsewhere.
  4. +
  5. Changes should usually be small. Adding or changing 1 term is great. Adding or changing 10 related terms is ok. Adding or changing 100 or more terms at once should be considered very carefully.
  6. +
+

4. Check the Git diff

+

This step is very important. Rather than simply trusting your change had the intended effect, we should always use a git diff as a first pass for sanity checking.

+

In our experience, having a visual git client like GitHub Desktop or sourcetree is really helpful for this part. In case you prefer the command line:

+
git status
+git diff
+
+

5. Quality control

+

Now it's time to run your quality control checks. This can either happen locally (5a) or through your continuous integration system (7/5b).

+

5a. Local testing

+

If you chose to run your test locally:

+
sh run.sh make IMP=false test
+
+

This will run the whole set of configured ODK tests on including your change. If you have a complex DOSDP pattern pipeline you may want to add PAT=false to skip the potentially lengthy process of rebuilding the patterns.

+
sh run.sh make IMP=false PAT=false test
+
+

6. Pull request

+

When you are happy with the changes, you commit your changes to your feature branch, push them upstream (to GitHub) and create a pull request. For example:

+
git add NAMEOFCHANGEDFILES
+git commit -m "Added biological process term #12"
+git push -u origin issue23removeprocess
+
+

Then you go to your project on GitHub, and create a new pull request from the branch, for example: https://github.com/INCATools/ontology-development-kit/pulls

+

There is a lot of great advise on how to write pull requests, but at the very least you should: +- mention the tickets affected: see #23 to link to a related ticket, or fixes #23 if, by merging this pull request, the ticket is fixed. Tickets in the latter case will be closed automatically by GitHub when the pull request is merged. +- summarise the changes in a few sentences. Consider the reviewer: what would they want to know right away. +- If the diff is large, provide instructions on how to review the pull request best (sometimes, there are many changed files, but only one important change).

+

7/5b. Continuous Integration Testing

+

If you didn't run and local quality control checks (see 5a), you should have Continuous Integration (CI) set up, for example: +- Travis +- GitHub Actions

+

More on how to set this up here. Once the pull request is created, the CI will automatically trigger. If all is fine, it will show up green, otherwise red.

+

8. Community review

+

Once all the automatic tests have passed, it is important to put a second set of eyes on the pull request. Ontologies are inherently social - as in that they represent some kind of community consensus on how a domain is organised conceptually. This seems high brow talk, but it is very important that as an ontology editor, you have your work validated by the community you are trying to serve (e.g. your colleagues, other contributors etc.). In our experience, it is hard to get more than one review on a pull request - two is great. You can set up GitHub branch protection to actually require a review before a pull request can be merged! We recommend this.

+

This step seems daunting to some hopefully under-resourced ontologies, but we recommend to put this high up on your list of priorities - train a colleague, reach out!

+

9. Merge and cleanup

+

When the QC is green and the reviews are in (approvals), it is time to merge the pull request. After the pull request is merged, remember to delete the branch as well (this option will show up as a big button right after you have merged the pull request). If you have not done so, close all the associated tickets fixed by the pull request.

+

10. Changelog (Optional)

+

It is sometimes difficult to keep track of changes made to an ontology. Some ontology teams opt to document changes in a changelog (simply a text file in your repository) so that when release day comes, you know everything you have changed. This is advisable at least for major changes (such as a new release system, a new pattern or template etc.).

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/odk-workflows/ManageAutomatedTest/index.html b/odk-workflows/ManageAutomatedTest/index.html new file mode 100644 index 00000000..372cd8dd --- /dev/null +++ b/odk-workflows/ManageAutomatedTest/index.html @@ -0,0 +1,1486 @@ + + + + + + + + + + + + + + + + + + + + + + Manage automated tests - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Manage automated tests

+ +

Constraint violation checks

+

We can define custom checks using SPARQL. SPARQL queries define bad modelling patterns (missing labels, misspelt URIs, and many more) in the ontology. If these queries return any results, then the build will fail. Custom checks are designed to be run as part of GitHub Actions Continuous Integration testing, but they can also run locally.

+

Steps to add a constraint violation check:

+
    +
  1. Add the SPARQL query in src/sparql. The name of the file should end with -violation.sparql. Please give a name that helps to understand which violation the query wants to check.
  2. +
  3. Add the name of the new file to odk configuration file src/ontology/uberon-odk.yaml:
      +
    1. Include the name of the file (without the -violation.sparql part) to the list inside the key custom_sparql_checks that is inside robot_report key.
    2. +
    3. +

      If the robot_report or custom_sparql_checks keys are not available, please add this code block to the end of the file.

      +

      yaml + robot_report: + release_reports: False + fail_on: ERROR + use_labels: False + custom_profile: True + report_on: + - edit + custom_sparql_checks: + - name-of-the-file-check +3. Update the repository so your new SPARQL check will be included in the QC.

      +
    4. +
    +
  4. +
+
sh run.sh make update_repo
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/odk-workflows/ManageDocumentation/index.html b/odk-workflows/ManageDocumentation/index.html new file mode 100644 index 00000000..529d2b84 --- /dev/null +++ b/odk-workflows/ManageDocumentation/index.html @@ -0,0 +1,1513 @@ + + + + + + + + + + + + + + + + + + + + + + Manage documentation - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Updating the Documentation

+

The documentation for UPHENO is managed in two places (relative to the repository root):

+
    +
  1. The docs directory contains all the files that pertain to the content of the documentation (more below)
  2. +
  3. the mkdocs.yaml file contains the documentation config, in particular its navigation bar and theme.
  4. +
+

The documentation is hosted using GitHub pages, on a special branch of the repository (called gh-pages). It is important that this branch is never deleted - it contains all the files GitHub pages needs to render and deploy the site. It is also important to note that the gh-pages branch should never be edited manually. All changes to the docs happen inside the docs directory on the main branch.

+

Editing the docs

+

Changing content

+

All the documentation is contained in the docs directory, and is managed in Markdown. Markdown is a very simple and convenient way to produce text documents with formatting instructions, and is very easy to learn - it is also used, for example, in GitHub issues. This is a normal editing workflow:

+
    +
  1. Open the .md file you want to change in an editor of choice (a simple text editor is often best). IMPORTANT: Do not edit any files in the docs/odk-workflows/ directory. These files are managed by the ODK system and will be overwritten when the repository is upgraded! If you wish to change these files, make an issue on the ODK issue tracker.
  2. +
  3. Perform the edit and save the file
  4. +
  5. Commit the file to a branch, and create a pull request as usual.
  6. +
  7. If your development team likes your changes, merge the docs into master branch.
  8. +
  9. Deploy the documentation (see below)
  10. +
+

Deploy the documentation

+

The documentation is not automatically updated from the Markdown, and needs to be deployed deliberately. To do this, perform the following steps:

+
    +
  1. In your terminal, navigate to the edit directory of your ontology, e.g.: + cd upheno/src/ontology
  2. +
  3. Now you are ready to build the docs as follows: + sh run.sh make update_docs + Mkdocs now sets off to build the site from the markdown pages. You will be asked to
      +
    • Enter your username
    • +
    • Enter your password (see here for using GitHub access tokens instead) + IMPORTANT: Using password based authentication will be deprecated this year (2021). Make sure you read up on personal access tokens if that happens!
    • +
    +
  4. +
+

If everything was successful, you will see a message similar to this one:

+

INFO - Your documentation should shortly be available at: https://obophenotype.github.io/upheno/ +3. Just to double check, you can now navigate to your documentation pages (usually https://obophenotype.github.io/upheno/). + Just make sure you give GitHub 2-5 minutes to build the pages!

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/odk-workflows/ReleaseWorkflow/index.html b/odk-workflows/ReleaseWorkflow/index.html new file mode 100644 index 00000000..73222d6f --- /dev/null +++ b/odk-workflows/ReleaseWorkflow/index.html @@ -0,0 +1,1599 @@ + + + + + + + + + + + + + + + + + + + + + + Release Workflow - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + +

The release workflow

+

The release workflow recommended by the ODK is based on GitHub releases and works as follows:

+
    +
  1. Run a release with the ODK
  2. +
  3. Review the release
  4. +
  5. Merge to main branch
  6. +
  7. Create a GitHub release
  8. +
+

These steps are outlined in detail in the following.

+

Run a release with the ODK

+

Preparation:

+
    +
  1. Ensure that all your pull requests are merged into your main (master) branch
  2. +
  3. Make sure that all changes to master are committed to GitHub (git status should say that there are no modified files)
  4. +
  5. Locally make sure you have the latest changes from master (git pull)
  6. +
  7. Checkout a new branch (e.g. git checkout -b release-2021-01-01)
  8. +
  9. You may or may not want to refresh your imports as part of your release strategy (see here)
  10. +
  11. Make sure you have the latest ODK installed by running docker pull obolibrary/odkfull
  12. +
+

To actually run the release, you:

+
    +
  1. Open a command line terminal window and navigate to the src/ontology directory (cd upheno/src/ontology)
  2. +
  3. Run release pipeline:sh run.sh make prepare_release -B. Note that for some ontologies, this process can take up to 90 minutes - especially if there are large ontologies you depend on, like PRO or CHEBI.
  4. +
  5. If everything went well, you should see the following output on your machine: Release files are now in ../.. - now you should commit, push and make a release on your git hosting site such as GitHub or GitLab.
  6. +
+

This will create all the specified release targets (OBO, OWL, JSON, and the variants, ont-full and ont-base) and copy them into your release directory (the top level of your repo).

+

Review the release

+
    +
  1. (Optional) Rough check. This step is frequently skipped, but for the more paranoid among us (like the author of this doc), this is a 3 minute additional effort for some peace of mind. Open the main release (upheno.owl) in you favourite development environment (i.e. Protégé) and eyeball the hierarchy. We recommend two simple checks:
      +
    1. Does the very top level of the hierarchy look ok? This means that all new terms have been imported/updated correctly.
    2. +
    3. Does at least one change that you know should be in this release appear? For example, a new class. This means that the release was actually based on the recent edit file.
    4. +
    +
  2. +
  3. Commit your changes to the branch and make a pull request
  4. +
  5. In your GitHub pull request, review the following three files in detail (based on our experience):
      +
    1. upheno.obo - this reflects a useful subset of the whole ontology (everything that can be covered by OBO format). OBO format has that speaking for it: it is very easy to review!
    2. +
    3. upheno-base.owl - this reflects the asserted axioms in your ontology that you have actually edited.
    4. +
    5. Ideally also take a look at upheno-full.owl, which may reveal interesting new inferences you did not know about. Note that the diff of this file is sometimes quite large.
    6. +
    +
  6. +
  7. Like with every pull request, we recommend to always employ a second set of eyes when reviewing a PR!
  8. +
+

Merge the main branch

+

Once your CI checks have passed, and your reviews are completed, you can now merge the branch into your main branch (don't forget to delete the branch afterwards - a big button will appear after the merge is finished).

+

Create a GitHub release

+
    +
  1. Go to your releases page on GitHub by navigating to your repository, and then clicking on releases (usually on the right, for example: https://github.com/obophenotype/upheno/releases). Then click "Draft new release"
  2. +
  3. As the tag version you need to choose the date on which your ontologies were build. You can find this, for example, by looking at the upheno.obo file and check the data-version: property. The date needs to be prefixed with a v, so, for example v2020-02-06.
  4. +
  5. You can write whatever you want in the release title, but we typically write the date again. The description underneath should contain a concise list of changes or term additions.
  6. +
  7. Click "Publish release". Done.
  8. +
+

Debugging typical ontology release problems

+

Problems with memory

+

When you are dealing with large ontologies, you need a lot of memory. When you see error messages relating to large ontologies such as CHEBI, PRO, NCBITAXON, or Uberon, you should think of memory first, see here.

+

Problems when using OBO format based tools

+

Sometimes you will get cryptic error messages when using legacy tools using OBO format, such as the ontology release tool (OORT), which is also available as part of the ODK docker container. In these cases, you need to track down what axiom or annotation actually caused the breakdown. In our experience (in about 60% of the cases) the problem lies with duplicate annotations (def, comment) which are illegal in OBO. Here is an example recipe of how to deal with such a problem:

+
    +
  1. If you get a message like make: *** [cl.Makefile:84: oort] Error 255 you might have a OORT error.
  2. +
  3. To debug this, in your terminal enter sh run.sh make IMP=false PAT=false oort -B (assuming you are already in the ontology folder in your directory)
  4. +
  5. This should show you where the error is in the log (eg multiple different definitions) +WARNING: THE FIX BELOW IS NOT IDEAL, YOU SHOULD ALWAYS TRY TO FIX UPSTREAM IF POSSIBLE
  6. +
  7. Open upheno-edit.owl in Protégé and find the offending term and delete all offending issue (e.g. delete ALL definition, if the problem was "multiple def tags not allowed") and save. +*While this is not idea, as it will remove all definitions from that term, it will be added back again when the term is fixed in the ontology it was imported from and added back in.
  8. +
  9. Rerun sh run.sh make IMP=false PAT=false oort -B and if it all passes, commit your changes to a branch and make a pull request as usual.
  10. +
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/odk-workflows/RepoManagement/index.html b/odk-workflows/RepoManagement/index.html new file mode 100644 index 00000000..c6d71f0a --- /dev/null +++ b/odk-workflows/RepoManagement/index.html @@ -0,0 +1,1716 @@ + + + + + + + + + + + + + + + + + + + + + + Manage your ODK Repository - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+ +
+ + + +
+
+ + + + + +

Managing your ODK repository

+

Updating your ODK repository

+

Your ODK repositories configuration is managed in src/ontology/upheno-odk.yaml. Once you have made your changes, you can run the following to apply your changes to the repository:

+
sh run.sh make update_repo
+
+

There are a large number of options that can be set to configure your ODK, but we will only discuss a few of them here.

+

NOTE for Windows users:

+

You may get a cryptic failure such as Set Illegal Option - if the update script located in src/scripts/update_repo.sh +was saved using Windows Line endings. These need to change to unix line endings. In Notepad++, for example, you can +click on Edit->EOL Conversion->Unix LF to change this.

+

Managing imports

+

You can use the update repository workflow described on this page to perform the following operations to your imports:

+
    +
  1. Add a new import
  2. +
  3. Modify an existing import
  4. +
  5. Remove an import you no longer want
  6. +
  7. Customise an import
  8. +
+

We will discuss all these workflows in the following.

+

Add new import

+

To add a new import, you first edit your odk config as described above, adding an id to the product list in the import_group section (for the sake of this example, we assume you already import RO, and your goal is to also import GO):

+
import_group:
+  products:
+    - id: ro
+    - id: go
+
+

Note: our ODK file should only have one import_group which can contain multiple imports (in the products section). Next, you run the update repo workflow to apply these changes. Note that by default, this module is going to be a SLME Bottom module, see here. To change that or customise your module, see section "Customise an import". To finalise the addition of your import, perform the following steps:

+
    +
  1. Add an import statement to your src/ontology/upheno-edit.owl file. We suggest to do this using a text editor, by simply copying an existing import declaration and renaming it to the new ontology import, for example as follows: + ... + Ontology(<http://purl.obolibrary.org/obo/upheno.owl> + Import(<http://purl.obolibrary.org/obo/upheno/imports/ro_import.owl>) + Import(<http://purl.obolibrary.org/obo/upheno/imports/go_import.owl>) + ...
  2. +
  3. Add your imports redirect to your catalog file src/ontology/catalog-v001.xml, for example: + <uri name="http://purl.obolibrary.org/obo/upheno/imports/go_import.owl" uri="imports/go_import.owl"/>
  4. +
  5. Test whether everything is in order:
      +
    1. Refresh your import
    2. +
    3. Open in your Ontology Editor of choice (Protege) and ensure that the expected terms are imported.
    4. +
    +
  6. +
+

Note: The catalog file src/ontology/catalog-v001.xml has one purpose: redirecting +imports from URLs to local files. For example, if you have

+
Import(<http://purl.obolibrary.org/obo/upheno/imports/go_import.owl>)
+
+

in your editors file (the ontology) and

+
<uri name="http://purl.obolibrary.org/obo/upheno/imports/go_import.owl" uri="imports/go_import.owl"/>
+
+

in your catalog, tools like robot or Protégé will recognize the statement +in the catalog file to redirect the URL http://purl.obolibrary.org/obo/upheno/imports/go_import.owl +to the local file imports/go_import.owl (which is in your src/ontology directory).

+

Modify an existing import

+

If you simply wish to refresh your import in light of new terms, see here. If you wish to change the type of your module see section "Customise an import".

+

Remove an existing import

+

To remove an existing import, perform the following steps:

+
    +
  1. remove the import declaration from your src/ontology/upheno-edit.owl.
  2. +
  3. remove the id from your src/ontology/upheno-odk.yaml, eg. - id: go from the list of products in the import_group.
  4. +
  5. run update repo workflow
  6. +
  7. delete the associated files manually:
      +
    • src/imports/go_import.owl
    • +
    • src/imports/go_terms.txt
    • +
    +
  8. +
  9. Remove the respective entry from the src/ontology/catalog-v001.xml file.
  10. +
+

Customise an import

+

By default, an import module extracted from a source ontology will be a SLME module, see here. There are various options to change the default.

+

The following change to your repo config (src/ontology/upheno-odk.yaml) will switch the go import from an SLME module to a simple ROBOT filter module:

+
import_group:
+  products:
+    - id: ro
+    - id: go
+      module_type: filter
+
+

A ROBOT filter module is, essentially, importing all external terms declared by your ontology (see here on how to declare external terms to be imported). Note that the filter module does +not consider terms/annotations from namespaces other than the base-namespace of the ontology itself. For example, in the +example of GO above, only annotations / axioms related to the GO base IRI (http://purl.obolibrary.org/obo/GO_) would be considered. This +behaviour can be changed by adding additional base IRIs as follows:

+
import_group:
+  products:
+    - id: go
+      module_type: filter
+      base_iris:
+        - http://purl.obolibrary.org/obo/GO_
+        - http://purl.obolibrary.org/obo/CL_
+        - http://purl.obolibrary.org/obo/BFO
+
+

If you wish to customise your import entirely, you can specify your own ROBOT command to do so. To do that, add the following to your repo config (src/ontology/upheno-odk.yaml):

+
import_group:
+  products:
+    - id: ro
+    - id: go
+      module_type: custom
+
+

Now add a new goal in your custom Makefile (src/ontology/upheno.Makefile, not src/ontology/Makefile).

+
imports/go_import.owl: mirror/ro.owl imports/ro_terms_combined.txt
+    if [ $(IMP) = true ]; then $(ROBOT) query  -i $< --update ../sparql/preprocess-module.ru \
+        extract -T imports/ro_terms_combined.txt --force true --individuals exclude --method BOT \
+        query --update ../sparql/inject-subset-declaration.ru --update ../sparql/postprocess-module.ru \
+        annotate --ontology-iri $(ONTBASE)/$@ $(ANNOTATE_ONTOLOGY_VERSION) --output $@.tmp.owl && mv $@.tmp.owl $@; fi
+
+

Now feel free to change this goal to do whatever you wish it to do! It probably makes some sense (albeit not being a strict necessity), to leave most of the goal instead and replace only:

+
extract -T imports/ro_terms_combined.txt --force true --individuals exclude --method BOT \
+
+

to another ROBOT pipeline.

+

Add a component

+

A component is an import which belongs to your ontology, e.g. is managed by +you and your team.

+
    +
  1. Open src/ontology/upheno-odk.yaml
  2. +
  3. If you dont have it yet, add a new top level section components
  4. +
  5. Under the components section, add a new section called products. +This is where all your components are specified
  6. +
  7. Under the products section, add a new component, e.g. - filename: mycomp.owl
  8. +
+

Example

+
components:
+  products:
+    - filename: mycomp.owl
+
+

When running sh run.sh make update_repo, a new file src/ontology/components/mycomp.owl will +be created which you can edit as you see fit. Typical ways to edit:

+
    +
  1. Using a ROBOT template to generate the component (see below)
  2. +
  3. Manually curating the component separately with Protégé or any other editor
  4. +
  5. Providing a components/mycomp.owl: make target in src/ontology/upheno.Makefile +and provide a custom command to generate the component
      +
    • WARNING: Note that the custom rule to generate the component MUST NOT depend on any other ODK-generated file such as seed files and the like (see issue).
    • +
    +
  6. +
  7. Providing an additional attribute for the component in src/ontology/upheno-odk.yaml, source, +to specify that this component should simply be downloaded from somewhere on the web.
  8. +
+

Adding a new component based on a ROBOT template

+

Since ODK 1.3.2, it is possible to simply link a ROBOT template to a component without having to specify any of the import logic. In order to add a new component that is connected to one or more template files, follow these steps:

+
    +
  1. Open src/ontology/upheno-odk.yaml.
  2. +
  3. Make sure that use_templates: TRUE is set in the global project options. You should also make sure that use_context: TRUE is set in case you are using prefixes in your templates that are not known to robot, such as OMOP:, CPONT: and more. All non-standard prefixes you are using should be added to config/context.json.
  4. +
  5. Add another component to the products section.
  6. +
  7. To activate this component to be template-driven, simply say: use_template: TRUE. This will create an empty template for you in the templates directory, which will automatically be processed when recreating the component (e.g. run.bat make recreate-mycomp).
  8. +
  9. If you want to use more than one component, use the templates field to add as many template names as you wish. ODK will look for them in the src/templates directory.
  10. +
  11. Advanced: If you want to provide additional processing options, you can use the template_options field. This should be a string with option from robot template. One typical example for additional options you may want to provide is --add-prefixes config/context.json to ensure the prefix map of your context is provided to robot, see above.
  12. +
+

Example:

+
components:
+  products:
+    - filename: mycomp.owl
+      use_template: TRUE
+      template_options: --add-prefixes config/context.json
+      templates:
+        - template1.tsv
+        - template2.tsv
+
+

Note: if your mirror is particularly large and complex, read this ODK recommendation.

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/odk-workflows/RepositoryFileStructure/index.html b/odk-workflows/RepositoryFileStructure/index.html new file mode 100644 index 00000000..bd7754e2 --- /dev/null +++ b/odk-workflows/RepositoryFileStructure/index.html @@ -0,0 +1,1679 @@ + + + + + + + + + + + + + + + + + + + + + + Your ODK Repository Overview - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Repository structure

+

The main kinds of files in the repository:

+
    +
  1. Release files
  2. +
  3. Imports
  4. +
  5. Components
  6. +
+

Release files

+

Release file are the file that are considered part of the official ontology release and to be used by the community. A detailed description of the release artefacts can be found here.

+

Imports

+

Imports are subsets of external ontologies that contain terms and axioms you would like to re-use in your ontology. These are considered "external", like dependencies in software development, and are not included in your "base" product, which is the release artefact which contains only those axioms that you personally maintain.

+

These are the current imports in UPHENO

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ImportURLType
gohttps://raw.githubusercontent.com/obophenotype/pro_obo_slim/master/pr_slim.owlNone
nbohttp://purl.obolibrary.org/obo/nbo.owlNone
uberonhttp://purl.obolibrary.org/obo/uberon.owlNone
clhttp://purl.obolibrary.org/obo/cl.owlNone
patohttp://purl.obolibrary.org/obo/pato.owlNone
mpathhttp://purl.obolibrary.org/obo/mpath.owlNone
rohttp://purl.obolibrary.org/obo/ro.owlNone
omohttp://purl.obolibrary.org/obo/omo.owlNone
chebihttps://raw.githubusercontent.com/obophenotype/chebi_obo_slim/main/chebi_slim.owlNone
obahttp://purl.obolibrary.org/obo/oba.owlNone
ncbitaxonhttp://purl.obolibrary.org/obo/ncbitaxon/subsets/taxslim.owlNone
prhttps://raw.githubusercontent.com/obophenotype/pro_obo_slim/master/pr_slim.owlNone
bspohttp://purl.obolibrary.org/obo/bspo.owlNone
ncithttp://purl.obolibrary.org/obo/ncit.owlNone
fbbthttp://purl.obolibrary.org/obo/fbbt.owlNone
fbdvhttp://purl.obolibrary.org/obo/fbdv.owlNone
hsapdvhttp://purl.obolibrary.org/obo/hsapdv.owlNone
wblshttp://purl.obolibrary.org/obo/wbls.owlNone
wbbthttp://purl.obolibrary.org/obo/wbbt.owlNone
planahttp://purl.obolibrary.org/obo/plana.owlNone
zfahttp://purl.obolibrary.org/obo/zfa.owlNone
xaohttp://purl.obolibrary.org/obo/xao.owlNone
hsapdv-uberonhttp://purl.obolibrary.org/obo/uberon/bridge/uberon-bridge-to-hsapdv.owlcustom
zfa-uberonhttp://purl.obolibrary.org/obo/uberon/bridge/uberon-bridge-to-zfa.owlcustom
zfs-uberonhttp://purl.obolibrary.org/obo/uberon/bridge/uberon-bridge-to-zfs.owlcustom
xao-uberonhttp://purl.obolibrary.org/obo/uberon/bridge/uberon-bridge-to-xao.owlcustom
wbbt-uberonhttp://purl.obolibrary.org/obo/uberon/bridge/uberon-bridge-to-wbbt.owlcustom
wbls-uberonhttp://purl.obolibrary.org/obo/uberon/bridge/uberon-bridge-to-wbls.owlcustom
fbbt-uberonhttp://purl.obolibrary.org/obo/uberon/bridge/uberon-bridge-to-fbbt.owlcustom
xao-clhttp://purl.obolibrary.org/obo/uberon/bridge/cl-bridge-to-xao.owlcustom
wbbt-clhttp://purl.obolibrary.org/obo/uberon/bridge/cl-bridge-to-wbbt.owlcustom
fbbt-clhttp://purl.obolibrary.org/obo/uberon/bridge/cl-bridge-to-fbbt.owlcustom
+

Components

+

Components, in contrast to imports, are considered full members of the ontology. This means that any axiom in a component is also included in the ontology base - which means it is considered native to the ontology. While this sounds complicated, consider this: conceptually, no component should be part of more than one ontology. If that seems to be the case, we are most likely talking about an import. Components are often not needed for ontologies, but there are some use cases:

+
    +
  1. There is an automated process that generates and re-generates a part of the ontology
  2. +
  3. A part of the ontology is managed in ROBOT templates
  4. +
  5. The expressivity of the component is higher than the format of the edit file. For example, people still choose to manage their ontology in OBO format (they should not) missing out on a lot of owl features. They may choose to manage logic that is beyond OBO in a specific OWL component.
  6. +
+

These are the components in UPHENO

+ + + + + + + + + + + + + + + + + + + + + +
FilenameURL
phenotypes_manual.owlNone
upheno-mappings.owlNone
cross-species-mappings.owlNone
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/odk-workflows/SettingUpDockerForODK/index.html b/odk-workflows/SettingUpDockerForODK/index.html new file mode 100644 index 00000000..7b74632b --- /dev/null +++ b/odk-workflows/SettingUpDockerForODK/index.html @@ -0,0 +1,1402 @@ + + + + + + + + + + + + + + + + + + + + + + Setting up Docker for ODK - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Setting up your Docker environment for ODK use

+

One of the most frequent problems with running the ODK for the first time is failure because of lack of memory. This can look like a Java OutOfMemory exception, +but more often than not it will appear as something like an Error 137. There are two places you need to consider to set your memory:

+
    +
  1. Your src/ontology/run.sh (or run.bat) file. You can set the memory in there by adding +robot_java_args: '-Xmx8G' to your src/ontology/upheno-odk.yaml file, see for example here.
  2. +
  3. Set your docker memory. By default, it should be about 10-20% more than your robot_java_args variable. You can manage your memory settings +by right-clicking on the docker whale in your system bar-->Preferences-->Resources-->Advanced, see picture below.
  4. +
+

dockermemory

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/odk-workflows/UpdateImports/index.html b/odk-workflows/UpdateImports/index.html new file mode 100644 index 00000000..ae534813 --- /dev/null +++ b/odk-workflows/UpdateImports/index.html @@ -0,0 +1,1656 @@ + + + + + + + + + + + + + + + + + + + + + + Manage imports - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Update Imports Workflow

+

This page discusses how to update the contents of your imports, like adding or removing terms. If you are looking to customise imports, like changing the module type, see here.

+

Importing a new term

+

Note: some ontologies now use a merged-import system to manage dynamic imports, for these please follow instructions in the section title "Using the Base Module approach".

+

Importing a new term is split into two sub-phases:

+
    +
  1. Declaring the terms to be imported
  2. +
  3. Refreshing imports dynamically
  4. +
+

Declaring terms to be imported

+

There are three ways to declare terms that are to be imported from an external ontology. Choose the appropriate one for your particular scenario (all three can be used in parallel if need be):

+
    +
  1. Protégé-based declaration
  2. +
  3. Using term files
  4. +
  5. Using the custom import template
  6. +
+

Protégé-based declaration

+

This workflow is to be avoided, but may be appropriate if the editor does not have access to the ODK docker container. +This approach also applies to ontologies that use base module import approach.

+
    +
  1. Open your ontology (edit file) in Protégé (5.5+).
  2. +
  3. Select 'owl:Thing'
  4. +
  5. Add a new class as usual.
  6. +
  7. Paste the full iri in the 'Name:' field, for example, http://purl.obolibrary.org/obo/CHEBI_50906.
  8. +
  9. Click 'OK'
  10. +
+

Adding Classes

+

Now you can use this term for example to construct logical definitions. The next time the imports are refreshed (see how to refresh here), the metadata (labels, definitions, etc.) for this term are imported from the respective external source ontology and becomes visible in your ontology.

+

Using term files

+

Every import has, by default a term file associated with it, which can be found in the imports directory. For example, if you have a GO import in src/ontology/go_import.owl, you will also have an associated term file src/ontology/go_terms.txt. You can add terms in there simply as a list:

+
GO:0008150
+GO:0008151
+
+

Now you can run the refresh imports workflow) and the two terms will be imported.

+

Using the custom import template

+

This workflow is appropriate if:

+
    +
  1. You prefer to manage all your imported terms in a single file (rather than multiple files like in the "Using term files" workflow above).
  2. +
  3. You wish to augment your imported ontologies with additional information. This requires a cautionary discussion.
  4. +
+

To enable this workflow, you add the following to your ODK config file (src/ontology/upheno-odk.yaml), and update the repository:

+
use_custom_import_module: TRUE
+
+

Now you can manage your imported terms directly in the custom external terms template, which is located at src/templates/external_import.owl. Note that this file is a ROBOT template, and can, in principle, be extended to include any axioms you like. Before extending the template, however, read the following carefully.

+

The main purpose of the custom import template is to enable the management off all terms to be imported in a centralised place. To enable that, you do not have to do anything other than maintaining the template. So if you, say currently import APOLLO_SV:00000480, and you wish to import APOLLO_SV:00000532, you simply add a row like this:

+
ID  Entity Type
+ID  TYPE
+APOLLO_SV:00000480  owl:Class
+APOLLO_SV:00000532  owl:Class
+
+

When the imports are refreshed see imports refresh workflow, the term(s) will simply be imported from the configured ontologies.

+

Now, if you wish to extend the Makefile (which is beyond these instructions) and add, say, synonyms to the imported terms, you can do that, but you need to (a) preserve the ID and ENTITY columns and (b) ensure that the ROBOT template is valid otherwise, see here.

+

WARNING. Note that doing this is a widespread antipattern (see related issue). You should not change the axioms of terms that do not belong into your ontology unless necessary - such changes should always be pushed into the ontology where they belong. However, since people are doing it, whether the OBO Foundry likes it or not, at least using the custom imports module as described here localises the changes to a single simple template and ensures that none of the annotations added this way are merged into the base file.

+

Refresh imports

+

If you want to refresh the import yourself (this may be necessary to pass the travis tests), and you have the ODK installed, you can do the following (using go as an example):

+

First, you navigate in your terminal to the ontology directory (underneath src in your hpo root directory).

+
cd src/ontology
+
+

Then, you regenerate the import that will now include any new terms you have added. Note: You must have docker installed.

+
sh run.sh make PAT=false imports/go_import.owl -B
+
+

Since ODK 1.2.27, it is also possible to simply run the following, which is the same as the above:

+
sh run.sh make refresh-go
+
+

Note that in case you changed the defaults, you need to add IMP=true and/or MIR=true to the command below:

+
sh run.sh make IMP=true MIR=true PAT=false imports/go_import.owl -B
+
+

If you wish to skip refreshing the mirror, i.e. skip downloading the latest version of the source ontology for your import (e.g. go.owl for your go import) you can set MIR=false instead, which will do the exact same thing as the above, but is easier to remember:

+
sh run.sh make IMP=true MIR=false PAT=false imports/go_import.owl -B
+
+

Using the Base Module approach

+

Since ODK 1.2.31, we support an entirely new approach to generate modules: Using base files. +The idea is to only import axioms from ontologies that actually belong to it. +A base file is a subset of the ontology that only contains those axioms that nominally +belong there. In other words, the base file does not contain any axioms that belong +to another ontology. An example would be this:

+

Imagine this being the full Uberon ontology:

+
Axiom 1: BFO:123 SubClassOf BFO:124
+Axiom 1: UBERON:123 SubClassOf BFO:123
+Axiom 1: UBERON:124 SubClassOf UBERON 123
+
+

The base file is the set of all axioms that are about UBERON terms:

+
Axiom 1: UBERON:123 SubClassOf BFO:123
+Axiom 1: UBERON:124 SubClassOf UBERON 123
+
+

I.e.

+
Axiom 1: BFO:123 SubClassOf BFO:124
+
+

Gets removed.

+

The base file pipeline is a bit more complex than the normal pipelines, because +of the logical interactions between the imported ontologies. This is solved by _first +merging all mirrors into one huge file and then extracting one mega module from it.

+

Example: Let's say we are importing terms from Uberon, GO and RO in our ontologies. +When we use the base pipelines, we

+

1) First obtain the base (usually by simply downloading it, but there is also an option now to create it with ROBOT) +2) We merge all base files into one big pile +3) Then we extract a single module imports/merged_import.owl

+

The first implementation of this pipeline is PATO, see https://github.com/pato-ontology/pato/blob/master/src/ontology/pato-odk.yaml.

+

To check if your ontology uses this method, check src/ontology/upheno-odk.yaml to see if use_base_merging: TRUE is declared under import_group

+

If your ontology uses Base Module approach, please use the following steps:

+

First, add the term to be imported to the term file associated with it (see above "Using term files" section if this is not clear to you)

+

Next, you navigate in your terminal to the ontology directory (underneath src in your hpo root directory).

+
cd src/ontology
+
+

Then refresh imports by running

+
sh run.sh make imports/merged_import.owl
+
+

Note: if your mirrors are updated, you can run sh run.sh make no-mirror-refresh-merged

+

This requires quite a bit of memory on your local machine, so if you encounter an error, it might be a lack of memory on your computer. A solution would be to create a ticket in an issue tracker requesting for the term to be imported, and one of the local devs should pick this up and run the import for you.

+

Lastly, restart Protégé, and the term should be imported in ready to be used.

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/odk-workflows/components/index.html b/odk-workflows/components/index.html new file mode 100644 index 00000000..682ef480 --- /dev/null +++ b/odk-workflows/components/index.html @@ -0,0 +1,1419 @@ + + + + + + + + + + + + + + + + + + + + + + Overview of components - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Adding components to an ODK repo

+

For details on what components are, please see component section of repository file structure document.

+

To add custom components to an ODK repo, please follow the following steps:

+

1) Locate your odk yaml file and open it with your favourite text editor (src/ontology/upheno-odk.yaml) +2) Search if there is already a component section to the yaml file, if not add it accordingly, adding the name of your component:

+
components:
+  products:
+    - filename: your-component-name.owl
+
+

3) Add the component to your catalog file (src/ontology/catalog-v001.xml)

+
  <uri name="http://purl.obolibrary.org/obo/upheno/components/your-component-name.owl" uri="components/your-component-name.owl"/>
+
+

4) Add the component to the edit file (src/ontology/upheno-edit.obo) +for .obo formats:

+
import: http://purl.obolibrary.org/obo/upheno/components/your-component-name.owl
+
+

for .owl formats:

+
Import(<http://purl.obolibrary.org/obo/upheno/components/your-component-name.owl>)
+
+

5) Refresh your repo by running sh run.sh make update_repo - this should create a new file in src/ontology/components. +6) In your custom makefile (src/ontology/upheno.Makefile) add a goal for your custom make file. In this example, the goal is a ROBOT template.

+
$(COMPONENTSDIR)/your-component-name.owl: $(SRC) ../templates/your-component-template.tsv 
+    $(ROBOT) template --template ../templates/your-component-template.tsv \
+  annotate --ontology-iri $(ONTBASE)/$@ --output $(COMPONENTSDIR)/your-component-name.owl
+
+

(If using a ROBOT template, do not forget to add your template tsv in src/templates/)

+

7) Make the file by running sh run.sh make components/your-component-name.owl

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/odk-workflows/index.html b/odk-workflows/index.html new file mode 100644 index 00000000..b7b3858d --- /dev/null +++ b/odk-workflows/index.html @@ -0,0 +1,1402 @@ + + + + + + + + + + + + + + + + + + + + + + Overview - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/organization/meetings/index.html b/organization/meetings/index.html new file mode 100644 index 00000000..c819abf7 --- /dev/null +++ b/organization/meetings/index.html @@ -0,0 +1,1497 @@ + + + + + + + + + + + + + + + + + + + + + + Meetings - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

The Unified Phenotype Ontology (uPheno) meeting series

+

The uPheno editors call is held every second Thursday (bi-weekly) on Zoom, provided by members of the Monarch Initiative and co-organised by members of the Alliance and Genome Resources. If you wish to join the meeting, you can open an issue on https://github.com/obophenotype/upheno/issues with the request to be added, or send an email to phenotype-ontologies-editors@googlegroups.com.

+

The meeting coordinator (MC) is the person charged with organising the meeting. The current MC is Ray, @rays22.

+

Meeting preparation

+
    +
  • The MC prepares the agenda in advance: everyone on the call is very busy and our time is precious.
  • +
  • Every agenda item has an associated ticket on GitHub, and a clear set of action items should be added in GitHub Tasklist syntax to the first comment on the issue.
  • +
  • If there are issues for any subtasks (e.g. PATO or Uberon edits), the list should be edited to link these.
  • +
  • Any items that do not have a subissue but do involve changes to patterns) should be edited to link to implementing PR.
  • +
  • It does not matter who wrote the first issue comment, the uPheno team can simply add a tasklist underneath the original comment and refine it over time.
  • +
  • Tag all issues which need discussion with "upheno call"
  • +
  • It must be clear from the task list what the uPheno team should be doing during the call (discuss, decide, review). For example, one item on the task list may read: "uPheno team to decide on appropriate label for template".
  • +
  • Conversely, no issue should be added to the agenda that does not have a clear set of action items associated with it that should be addressed during the call. These actions may include making and documenting modelling decisions.
  • +
  • Go through up to 10 issues on the uPheno issue tracker before each meeting to determine how to progress on them, and add action items. Only if they need to be discussed, add the "upheno call" label.
  • +
+

Meeting

+
    +
  • Every meeting should start with a quick (max 5 min, ideally 3 min) overview of all the goals and how they processed. The MC should mention all blockers and goals, even the ones we did not make any progress on, to keep the focus on the priorities:
  • +
  • uPheno releases
  • +
  • uPheno documentation
  • +
  • Pattern creation
  • +
  • Patternisation: The process of ensuring that phenotype ontologies are using uPheno conformant templates to define their phenotypes.
  • +
  • Harmonisation: The process of ensuring that phenotype patterns are applied consistently across ontologies.
  • +
  • For new pattern discussions:
  • +
  • Every new pattern proposal should come with a new GitHub issue, appropriately tagged.
  • +
  • The issue text should detail the use cases for the pattern well, and these use cases should also be documented in the "description" part of the DOSDP YAML file. Uses cases should include expected classifications and why we want them (and potentially classifications to avoid). e.g. axis-specific dimension traits should classify under more abstractly defined dimension traits which in term should classify under Morphology. Add some examples of contexts where grouping along these classifications is useful.
  • +
  • Agenda items may include discussion and decisions about more general modelling issues that affect more than one pattern, but these should also be documented as tickets as described above.
  • +
+

After the meeting

+
    +
  • After every meeting, update all issues discussed on GitHub and, in particular, clarify the remaining action items.
  • +
  • Ensure that the highest priority issues are discussed first.
  • +
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/organization/outreach/index.html b/organization/outreach/index.html new file mode 100644 index 00000000..42d5f4b3 --- /dev/null +++ b/organization/outreach/index.html @@ -0,0 +1,1516 @@ + + + + + + + + + + + + + + + + + + + + + + Outreach - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

The Outreach Programme of the Unified Phenotype Ontology (uPheno) development team

+

Outreach-calls

+

The uPheno organises an outreach call every four weeks to listen to external stakeholders describing their need for cross-species phenotype integration.

+

Schedule

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
DateLessonNotesRecordings
2024/04/05TBDTBD
2024/3/08Computational identification of disease models through cross-species phenotype comparisonDiego A. Pava, Pilar Cacheiro, Damian Smedley (IMPC)Recording
2024/02/09Use cases for uPheno in the Alliance of Genome Resources and MGISue Bello (Alliance of Genome Resources, MGI)Recording
+

Possible topics

+
    +
  • Cross-species inference in Variant and Gene Prioritisation algorithms (Exomiser).
  • +
  • Cross-species comparison of phenotypic profiles (Monarch Initiative Knowledge Graph)
  • +
  • Cross-species data in biomedical knowledge graphs (Kids First)
  • +
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/reference/components/dpo/index.html b/reference/components/dpo/index.html new file mode 100644 index 00000000..f99a0ee8 --- /dev/null +++ b/reference/components/dpo/index.html @@ -0,0 +1,1478 @@ + + + + + + + + + + + + + + + + + + + + + + Drosophila Phenotype Ontology - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Drosophila Phenotype Ontology

+ +
    +
  1. summary Drosophila Phenotype Ontology
  2. +
+

*The Drosophila phenotype +ontologyOsumi-Sutherland et al, J Biomed Sem.

+

The DPO is formally a subset of FBcv, made available from +http://purl.obolibrary.org/obo/fbcv/dpo.owl

+

Phenotypes in FlyBase may either by assigned to FBcv (dpo) classes, or +they may have a phenotype_manifest_in to FBbt (anatomy).

+

For integration we generate the following ontologies:

+

*http://purl.obolibrary.org/obo/upheno/imports/fbbt_phenotype.owl\ +*http://purl.obolibrary.org/obo/upheno/imports/uberon_phenotype.owl\ +*http://purl.obolibrary.org/obo/upheno/imports/go_phenotype.owl\ +*http://purl.obolibrary.org/obo/upheno/imports/cl_phenotype.owl

+

(see Makefile)

+

This includes a phenotype class for every anatomy class - the IRI is +suffixed with "PHENOTYPE". Using these ontologies, Uberon and CL +phenotypes make the groupings.

+

We include

+

*http://purl.obolibrary.org/obo/upheno/dpo/dpo-importer.owl

+

Which imports dpo plus auto-generated fbbt phenotypes.

+

The dpo-importer is included in the [MetazoanImporter]

+

Additional Notes

+

We create a local copy of fbbt that has "Drosophila " prefixed to all +labels. This gives us a hierarchy:

+

* eye phenotype (defined using Uberon)\ +* compound eye phenotype  (defined using Uberon)\ +* drosophila eye phenotype (defined using FBbt)

+

TODO

+

*http://code.google.com/p/cell-ontology/issues/detail?id=115ensure all CL to FBbt equiv axioms are present (we have good coverage for Uberon)

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/reference/components/fypo/index.html b/reference/components/fypo/index.html new file mode 100644 index 00000000..5505f807 --- /dev/null +++ b/reference/components/fypo/index.html @@ -0,0 +1,1390 @@ + + + + + + + + + + + + + + + + + + + + + + Fission Yeast Phenotype Ontology - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/reference/components/hp/index.html b/reference/components/hp/index.html new file mode 100644 index 00000000..6138cbd2 --- /dev/null +++ b/reference/components/hp/index.html @@ -0,0 +1,1416 @@ + + + + + + + + + + + + + + + + + + + + + + Human Phenotype Ontology - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +
    +
  1. summary Human Phenotype Ontology
  2. +
  3. labels Featured
  4. +
+

Links

+

*http://www.human-phenotype-ontology.org/\ +* Köhler S, Doelken SC, Mungall CJ, Bauer S, Firth HV, Bailleul-Forestier I, Black GC, Brown DL, Brudno M, Campbell J, FitzPatrick DR, Eppig JT, Jackson AP, Freson K, Girdea M, Helbig I, Hurst JA, Jähn J, Jackson LG, Kelly AM, Ledbetter DH, Mansour S, Martin CL, Moss C, Mumford A, Ouwehand WH, Park SM, Riggs ER, Scott RH, Sisodiya S, Van Vooren S, Wapner RJ, Wilkie AO, Wright CF, Vulto-van Silfhout AT, de Leeuw N, de Vries BB, Washingthon NL, Smith CL, Westerfield M, Schofield P, Ruef BJ, Gkoutos GV, Haendel M, Smedley D, Lewis SE, Robinson PN. The Human Phenotype Ontology project: linking molecular biology and disease through phenotype data.Nucleic Acids Res.2014 Jan;42(Database issue):D966-74 [pubmed]

+

*HPO +browser\ +*HP in +OntoBee\ +*HP in OLSVis

+

OWL Axiomatization

+

The OWL axioms for HP are in the +src/ontology/hp +directory on this site.

+

The structure is analagous to that of the [MP].

+

Status

+

The OWL axiomatization is updated frequently to stay in sync with +changes in the MP

+

Editing the axioms

+

The edit file is currently:

+

*http://purl.obolibrary.org/obo/hp/hp-equivalence-axioms-subq-ubr.owl

+

Edit this in protege.

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/reference/components/index.html b/reference/components/index.html new file mode 100644 index 00000000..4a64c3f8 --- /dev/null +++ b/reference/components/index.html @@ -0,0 +1,1384 @@ + + + + + + + + + + + + + + + + + + + + + + Overview - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/reference/components/mp/index.html b/reference/components/mp/index.html new file mode 100644 index 00000000..e4960b74 --- /dev/null +++ b/reference/components/mp/index.html @@ -0,0 +1,1430 @@ + + + + + + + + + + + + + + + + + + + + + + Mammalian Phenotype Ontology - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +
    +
  1. summary Mouse Phenotype Ontology
  2. +
  3. labels Featured
  4. +
+

Links

+

*The Mammalian Phenotype Ontology: enabling robust +annotation and comparative +analysisSmith CL, Eppig JT\ +*MP browser at +MGI\ +*MP in +OntoBee\ +*MP in OLSVis

+

OWL Axiomatization

+

The OWL axioms for MP are in the +src/ontology/mp +directory on this site.

+

*http://purl.obolibrary.org/obo/mp.owl- direct conversion of MGI-supplied obo file\ +*http://purl.obolibrary.org/obo/mp/mp-importer.owl- imports additional axioms, including the following ones below:\ +*http://purl.obolibrary.org/obo/mp.owl\ +*http://purl.obolibrary.org/obo/upheno/imports/chebi_import.owl\ +*http://purl.obolibrary.org/obo/upheno/imports/uberon_import.owl\ +*http://purl.obolibrary.org/obo/upheno/imports/pato_import.owl\ +*http://purl.obolibrary.org/obo/upheno/imports/go_import.owl\ +*http://purl.obolibrary.org/obo/upheno/imports/mpath_import.owl\ +*http://purl.obolibrary.org/obo/mp/mp-equivalence-axioms-subq-ubr.owl\ +\

+

Status

+

The OWL axiomatization is updated frequently to stay in sync with +changes in the MP

+

Editing the axioms

+

The edit file is currently:

+

*http://purl.obolibrary.org/obo/mp/mp-equivalence-axioms-edit.owl

+

Edit this in protege.

+

The file mp-equivalence-axioms.obo is DEPRECATED!

+

TermGenie

+

*http://mp.termgenie.org/\ +*http://mp.termgenie.org/TermGenieFreeForm

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/reference/components/wbphenotype/index.html b/reference/components/wbphenotype/index.html new file mode 100644 index 00000000..2a1498f1 --- /dev/null +++ b/reference/components/wbphenotype/index.html @@ -0,0 +1,1417 @@ + + + + + + + + + + + + + + + + + + + + + + C. elegans Phenotype Ontology - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +
    +
  1. summary Worm Phenotype Ontology
  2. +
  3. labels Featured
  4. +
+

Links

+

* Schindelman, Gary, et al.Worm Phenotype Ontology: +integrating phenotype data within and beyond the C. +elegans +community.BMC bioinformatics 12.1 (2011): 32.\ +*WBPhenotype in +OntoBee\ +*WBPhenotype in +OLSVis

+

OWL Axiomatization

+

The OWL axioms for WBPhenotype are in the +src/ontology/wbphenotype +directory on this site.

+

*http://purl.obolibrary.org/obo/wbphenotype.owl- direct conversion of WormBase-supplied obo file\ +*http://purl.obolibrary.org/obo/wbphenotype/wbphenotype-importer.owl- imports additional axioms.

+

The structure roughly follows that of the [MP]. The worm anatomy is +used.

+

Editing the axioms

+

Currently the source is wbphenotype/wbphenotype-equivalence-axioms.obo, +the OWL is generated from here. We are considering switching this +around, so the OWL is edited, using Protege.

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/reference/components/zp/index.html b/reference/components/zp/index.html new file mode 100644 index 00000000..5a8c4649 --- /dev/null +++ b/reference/components/zp/index.html @@ -0,0 +1,1425 @@ + + + + + + + + + + + + + + + + + + + + + + Zebrafish Phenotype Ontology - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Introduction

+

This page describes the generation of the zebrafish phenotype ontology

+

Details

+

The ZP differs considerably from [HP], [MP] and others. ZFIN do not +annotate with a pre-composed phenotype ontology - all annotations +compose phenotypes on-the-fly using a combination of PATO, ZFA, GO and +other ontologies.

+

We use these combinations to construct ZP on the fly, by naming each +distinct combination, assigning it an ID, and placing it in the +hierarchy.

+

The process is described here:

+ +

The OWL formalism for ZFIN annotations is described here:

+ +

The java implementation is here:

+
    +
  • https://github.com/sba1/bio-ontology-zp
  • +
+

OWL Axiomatization

+

The OWL axioms for ZP are in +zp.owl +that is build on our hudson server.

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/reference/core_concepts/index.html b/reference/core_concepts/index.html new file mode 100644 index 00000000..d82c630d --- /dev/null +++ b/reference/core_concepts/index.html @@ -0,0 +1,1760 @@ + + + + + + + + + + + + + + + + + + + + + + Core concepts - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + +

Core concepts

+ +

Traits and phenotypes - the Conceptual model

+

Overview

+ + +

Table of contents

+ +

+

General characteristics

+

"Characteristics" or "qualities" refer to an inherent or distinguishing characteristic or attribute of something or someone. +It represents a feature that defines the nature of an object, organism, or entity and can be used to describe, compare, and categorize different things. +Characteristics can be either qualitative (such as color, texture, or taste) or quantitative (such as height, weight, or age).

+

The Phenotype And Trait Ontology (PATO) is the reference ontology for general characteristics in the OBO world.

+

Some of the most widely use characteristics can be seen in the following tables

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
qualitydescriptionexample
Length (PATO:0000122)A 1-D extent quality which is equal to the distance between two points.
Mass (PATO:0000128)A physical quality that inheres in a bearer by virtue of the proportion of the bearer's amount of matter.
Amount (PATO:0000070)The number of entities of a type that are part of the whole organism.
Morphology (PATO:0000051)A quality of a single physical entity inhering in the bearer by virtue of the bearer's size or shape or structure.
+

Note from the authors: The descriptions above have been taken from PATO, but they are not very.. user friendly.

+

+

Biological Trait/Characteristics/Attribute

+

Characteristics such as the one above can be used to describe a variety of entities such as biological, environmental and social. +We are specifically concerned with biological traits, which are characteristics that refer to an inherent characteristic of a biological entity, such as an organ (the heart), a process (cell division), a chemical entity (lysine) in the blood.

+

The Ontology of Biological Attributes (OBA) is the reference ontology for biological characteristics in the OBO world. +There are a few other ontologies that describe biological traits, such as the Vertebrate Phenotype Ontology and the Ascomycete Phenotype Ontology (APO), but these are more species specific, and, more importantly, are not integrated in the wider EQ modelling framework.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PropertyExample termDefinition
LengthOBA:VT0002544The length of a digit.
MassOBA:VT0001259The mass of a multicellular organism.
LevelOBA:2020005The amount of lysine in blood.
MorphologyOBA:VT0005406The size of a heart.
+

+

Bearer of Biological Characteristics

+

In biological contexts, the term "bearer" refers to the entity that possesses or carries a particular characteristic or quality. +The bearer can be any biological entity, such as an organism, an organ, a cell, or even a molecular structure, that exhibits a specific trait or feature. +Some examples:

+
    +
  1. Organism as a Bearer:
  2. +
  3. Example: A specific tree (such as an oak tree) is the bearer of the characteristic 'height'.
  4. +
  5. Explanation: The tree as an organism carries or has the property of height, making it the bearer of this characteristic.
  6. +
  7. Organ as a Bearer:
  8. +
  9. Example: The heart of a mammal can be the bearer of the characteristic 'heart size'.
  10. +
  11. Explanation: Here, the heart is the organ that possesses the 'heart size' charactertistic. The characteristic ('heart size') is a quality of the heart itself.
  12. +
  13. Cell as a Bearer:
  14. +
  15. Example: A red blood cell is the bearer of the characteristic 'cell diameter'.
  16. +
  17. Explanation: The diameter is a property of the individual cell. Thus, each red blood cell is the bearer of its diameter measurement.
  18. +
  19. Molecular Structure as a Bearer:
  20. +
  21. Example: A DNA molecule can be the bearer of the characteristic 'sequence length'.
  22. +
  23. Explanation: The length of the DNA sequence is a property of the DNA molecule itself, making the molecule the bearer of this characteristic.
  24. +
  25. Genetic Trait as a Bearer:
  26. +
  27. Example: A fruit fly (Drosophila melanogaster) can be the bearer of a genetic trait like eye color.
  28. +
  29. Explanation: The organism (fruit fly) carries the genetic information that determines eye color, making it the bearer of this specific trait.
  30. +
+

In each example, the "bearer" is the entity that has, carries, or exhibits a particular biological characteristic. This concept is fundamental in biology and bioinformatics for linking specific traits, qualities, or features to the entities that possess them, thereby enabling a clearer understanding and categorization of biological diversity and functions.

+

+

Phenotypic change

+

A phenotypic change refers to some deviation from reference morphology, physiology, or behavior. +This is the most widely used, and most complicated category of phenotype terms for data specialists to understand.

+

Conceptually, a phenotypic abnormality comprises:

+
    +
  • a biological attribute (which includes a biological bearer)
  • +
  • an "change" modifier
  • +
  • (optionally) a directional modifier (increased / decreased)
  • +
  • a comparator
  • +
+

Biological attributes such as blood lysine amount (OBA:2020005) have been discussed earlier in this document. +The most widely used change modifier used in practice is abnormal (PATO:0000460). +This modifier signifies that the phenotypic change term describes a deviation that is abnormal, such as "Hyperlysinemia" (HP:0002161), which describes and increased concentration of lysine in the blood. +Other modifiers include normal (PATO:0000461), which describes a change within in the normal range (sometimes interpreted as "no change"). +A directional modifier like increased (PATO:0040043) or decreased (PATO:0040042). In practice, most of our "characteristic" terms have specialised directional variants such as decreased amount (PATO:0001997) which can be used to describe phenotypes. +Comparators are the most confusing aspects of phenotypic change. +The first question someone has to ask when they see a concept describing is change like increased blood lysine levels is "compared to what?". +Depending on biological context, the assumed comparators vary widely. +For example, in clinical phenotyping, it is mostly assumed that +a phenotypic feature corresponds to a deviation from the normal range, see HPO docs.

+
    +
  • Nature of "comparators" in the notion of a phenotypic abnormality.
  • +
  • In database curation you are effectively de-contextualising the phenotype term, which means you loose the original comparator.
  • +
  • normal changed wildtype comparator
  • +
+

The Unified Phenotype Ontology (uPheno) is the reference ontology for biological abnormalities in the OBO world. +There are a many species-specific ontologies in the OBO world, such as the Mammalian Phenotype Ontology (MP), the Human Phenotype Ontology (HPO) and the Drosophila Phenotype Ontology (DPO), see here.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PropertyExample termDefinition
LengthUPHENO:0072215Increased length of the digit.
MassUPHENO:0054299Decreased multicellular organism mass.
LevelUPHENO:0034327Decreased level of lysine in blood.
MorphologyUPHENO:0001471Increased size of the heart.
+

+ +

+

Disease

+

+

Measurements

+

In biological data curation, it’s essential to differentiate between measurements and traits. Measurements, such as “blood glucose amount,” are quantitative indicators, providing numerical values. In contrast, traits, like “Hyperglycemia,” encompass both qualitative and quantitative characteristics, representing broader phenotypic states. This difference is crucial in ontology modeling, where measurements are directly linked to specific values, while traits reflect more comprehensive biological attributes. For example, “body temperature” is a measurement, whereas “Fever” represents a trait associated with elevated temperatures. Understanding this contrast is fundamental for accurate data representation and interpretation, ensuring nuanced understanding of biological entities and phenotypic variability.

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/reference/eq/index.html b/reference/eq/index.html new file mode 100644 index 00000000..2223f9e1 --- /dev/null +++ b/reference/eq/index.html @@ -0,0 +1,1384 @@ + + + + + + + + + + + + + + + + + + + + + + Overview of EQ modelling - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/reference/imports/go/index.html b/reference/imports/go/index.html new file mode 100644 index 00000000..159ded71 --- /dev/null +++ b/reference/imports/go/index.html @@ -0,0 +1,1384 @@ + + + + + + + + + + + + + + + + + + + + + + Gene Ontology - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/reference/imports/index.html b/reference/imports/index.html new file mode 100644 index 00000000..69e19170 --- /dev/null +++ b/reference/imports/index.html @@ -0,0 +1,1418 @@ + + + + + + + + + + + + + + + + + + + + + + Overview - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +
    +
  1. labels Featured
  2. +
  3. Imported ontologies
  4. +
+

Introduction

+

Imports directory:

+

*http://purl.obolibrary.org/obo/upheno/imports/

+

Currently the imports includes:

+

* imports/chebi_import.owl\ +* imports/doid_import.owl\ +* imports/go_import.owl\ +* imports/mpath_import.owl\ +* imports/pato_import.owl\ +* imports/pr_import.owl\ +* imports/uberon_import.owl\ +* imports/wbbt_import.owl

+

Anatomy

+

To avoid multiple duplicate classes for heart, lung, skin etc we map all +classes to [Uberon] where this is applicable. For more divergent species +such as fly and C elegans we use the appropriate species-specific +ontology.

+

Currently there are a small number of highly specific classes in FMA +that are being used and have no corresponding class in Uberon

+

Methods

+

We use the OWLAPI SyntacticLocalityModularityExtractor, via [OWLTools]. +See the http://purl.obolibrary.org/obo/upheno/Makefile for details

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/reference/imports/pato/index.html b/reference/imports/pato/index.html new file mode 100644 index 00000000..a2bdc498 --- /dev/null +++ b/reference/imports/pato/index.html @@ -0,0 +1,1400 @@ + + + + + + + + + + + + + + + + + + + + + + PATO - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +
    +
  1. summary PATO
  2. +
+

Introduction

+

PATO is an ontology of phenotypic qualities. We use PATO to compose +phenotypic descriptions. See [OWLAxiomatization]

+

Details

+

See https://code.google.com/p/pato/

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/reference/imports/uberon/index.html b/reference/imports/uberon/index.html new file mode 100644 index 00000000..81938312 --- /dev/null +++ b/reference/imports/uberon/index.html @@ -0,0 +1,1384 @@ + + + + + + + + + + + + + + + + + + + + + + Uberon - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/reference/mappings/index.html b/reference/mappings/index.html new file mode 100644 index 00000000..a877eda3 --- /dev/null +++ b/reference/mappings/index.html @@ -0,0 +1,1384 @@ + + + + + + + + + + + + + + + + + + + + + + Overview - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/reference/mappings/mp_hp/index.html b/reference/mappings/mp_hp/index.html new file mode 100644 index 00000000..e9e953be --- /dev/null +++ b/reference/mappings/mp_hp/index.html @@ -0,0 +1,1384 @@ + + + + + + + + + + + + + + + + + + + + + + MP-HP - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/reference/modelling/abnormal/index.html b/reference/modelling/abnormal/index.html new file mode 100644 index 00000000..bdd3dad4 --- /dev/null +++ b/reference/modelling/abnormal/index.html @@ -0,0 +1,1420 @@ + + + + + + + + + + + + + + + + + + + + + + Abnormal phenotypes - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +
    +
  1. summary How inference of abnormality works
  2. +
+

Introduction

+

The current design patterns are such that the abnormal qualifier is only +added when the quality class in the definition is neutral.

+

However, we still need to be able to infer

+

* Hyoplasia of right ventricle SubClassOf Abnormality of right ventricle

+

Because the latter class definition includes qualifier some abnormal, +the SubClassOf axiom will not be entailed unless the qualifier is +explicitly stated or inferred

+

Details

+

We achieve this by including an axiom to PATO such that decreased sizes +etc are inferred to be qualifier some abnormal.

+

We do this with an exiom in imports/extra.owl

+

* 'deviation(from normal)' SubClassOf qualifier some abnormal

+

Anything under 'increased', 'decreased' etc in PATO is pre-reasoned in +PATO to be here.

+

See the following explanation:

+

http://phenotype-ontologies.googlecode.com/svn/trunk/doc/images/has-qualifier-inference.png

+

Limitations

+

For this strategy to work it requires the PATO classes themselves to be +classified under deviation from normal. This may not always be the case

+

Notes

+

Do not be distracted by the fact the has-qualifier relation is named +has-component at the moment

+

https://code.google.com/p/phenotype-ontologies/issues/detail?id=45

+

Notes

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/reference/modelling/absence/index.html b/reference/modelling/absence/index.html new file mode 100644 index 00000000..406377b1 --- /dev/null +++ b/reference/modelling/absence/index.html @@ -0,0 +1,1473 @@ + + + + + + + + + + + + + + + + + + + + + + Absence modelling - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +
    +
  1. summary Discussion of issues pertaining to modeling of absence in + phenotype ontologies
  2. +
+

Introduction

+

Much has been written on the subject of representing absence. Before +diving into the logical issues it is worth examining patterns in +existing phenotype ontologies to understand what user expectations may +typically be for absence.

+

Background

+

*Absence_Phenotypes_in_OWL(Phenoscape Wiki)\ +* (outdated) material on the oldPATO +wiki.

+

Details

+

Strict logical absence vs absence of some

+

It is not uncommon to see patterns such as

+

From a strict logical perspective, this is inverted. "absent incisors" +surely means "absence of all incisors", or put another way "the animal +has no incisors". Yet it would be possible to have an animal with +*absent* lower incisors and *present* upper incisors, yielding what +seems a contradiction (because the subClass axiom would say this +partial-incisor animal lacked all incisors).

+

If the ontology were in fact truly modeling "absence of *all* S" then +it would lead to a curious ontology structure, with the typical tree +structure of the anatomy ontology representing S inverted into a +polyhierarchical fan in the absent-S ontology.

+

From this it can be cautiously inferred that the intent of the phenotype +ontology curator and user is in fact to model "absence of *some* S" +rather than "absence of *all* S". This is not necessarily a universal +rule, and the intent may vary depending on whether we are talking about +a serially repeated structure or one that typically occurs in isolation. +The intent may also be to communicate that a *significant number* of S +is missing.

+

Absence as a type of morphology

+

It is also not uncommon to see patterns such as:

+

Again, from a strict logical perspective this is false. If the spleen is +absent then what does the "morphology" of the parent refer to?

+

However, this inference is clearly a desirable one from the point of +view of the phenotype ontology editors and users, as it is common in +ontologies for a variety of structures. For example:

+

And:

+

These patterns can be formally defended on developmental biology +grounds. "absence" here is _not_ equivalent to logical absence. It +refers specifically to developmental absence.

+

Furthermore, strict logical absence leads to undesirable inferences. It +would be odd to include a nematode worm as having the phenotype "spleen +absent", because worms have not evolved spleens. But the logical +description of not having a spleen as part fets a worm.

+

Similarly, if the strict cardinality interpretation were intended, we +would expect to see:

+

i.e. if you're missing your entire hindlegs, you're *necessarily* +missing your femurs. But it must be emphatisized that this is *not* +how phenotype ontologies are classified. This goes for a wide range of +structures and other relationship types. In MP, "absent limb buds" are +*not* classified under "absent limbs", even though it is impossible +for a mammal to have limbs without having had limb buds.

+

Absence as part of a size-morphology spectrum

+

The existing treatment of absence can be formally defended +morphologically by conceiving of a morphological value space, with +"large" at one end and "small" at the other. As we get continuously +smaller, there may come an arbitrary point whereby we say "surely this +is no longer a limb" (and of course, we are not talking about a pure +geometrical size transformation here - as a limb reaches extreme edges +of a size range various other morphological changes necessarily happen). +But this cutoff is arguably arbitrary, and the resulting discontinuity +causes problems. It is simpler to treat absence as being one end of a +size scale.

+

Summary

+

This is barely touching the subject, and is intended to illustrate that +things may be more subtle than naively treating words like "absent" as +precisely equivalent to cardinality=0. An understanding of the medical, +developmental and evolutionary contexts are absolutely required, +together with an understanding of the entailments of different logical +formulations.

+

Even though existing phenotype ontologies may not be conceived of +formally, it is implicit than they do not model absence as being +equivalent to cardinality=0 / not(has_part), because the structure of +these ontologies would look radically different.

+

TODO

+

Link to Jim Balhoff's PhenoDay paper and discussion

+

Here's the link: http://phenoday2014.bio-lark.org/pdf/11.pdf

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/reference/patterns/index.html b/reference/patterns/index.html new file mode 100644 index 00000000..c618b167 --- /dev/null +++ b/reference/patterns/index.html @@ -0,0 +1,1382 @@ + + + + + + + + + + + + + + + + + + + + Design Patterns Overview - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/reference/phenotype-ontology-alignment/index.html b/reference/phenotype-ontology-alignment/index.html new file mode 100644 index 00000000..d752c321 --- /dev/null +++ b/reference/phenotype-ontology-alignment/index.html @@ -0,0 +1,1452 @@ + + + + + + + + + + + + + + + + + + + + + + Overview - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Aliging species specific phenotype ontologies

+

Phenotype ontologies use different reference ontologies for their EQs. Everything in uPheno is integrated towards a common set of reference ontologies, in particular Uberon and CL. In order to integrate species-independent anatomy ontologies we employ the following workflow for phenotype ontologies:

+
    +
  1. Create a base-plus module from the ontology
  2. +
  3. Rename all Uberon-aligned entities using ROBOT rename. This replaces basically species specific anatomy references with Uberon anatomy references
  4. +
  5. Delete all species specific references from uPheno (FBBT, XAO, ZFA, etc). This also deletes all EQs which have non-Uberon references.
  6. +
  7. For all remaining species-specific anatomy terms, we retain only the link to the nearest Uberon term.
  8. +
+

Rules for phenotype ontologies to be integrated

+
    +
  1. Every phenotype ontology must export a base module at the proper PURL location
  2. +
  3. Every phenotype ontology must export a upheno export module at the proper PURL location
  4. +
+

When two classes are merged in uPheno based on a cross-species mapping, we assert the most general common ancestor as parent.

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/reference/qc/index.html b/reference/qc/index.html new file mode 100644 index 00000000..6ab3c309 --- /dev/null +++ b/reference/qc/index.html @@ -0,0 +1,1393 @@ + + + + + + + + + + + + + + + + + + + + + + Overview - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

uPheno Quality Control

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/reference/qc/odk_checks/index.html b/reference/qc/odk_checks/index.html new file mode 100644 index 00000000..c9a6590c --- /dev/null +++ b/reference/qc/odk_checks/index.html @@ -0,0 +1,1393 @@ + + + + + + + + + + + + + + + + + + + + + + Standard OBO checks - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

ODK: Basic Quality Control

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/reference/reconciliation_effort/index.html b/reference/reconciliation_effort/index.html new file mode 100644 index 00000000..2b6cc448 --- /dev/null +++ b/reference/reconciliation_effort/index.html @@ -0,0 +1,1382 @@ + + + + + + + + + + + + + + + + + + + + + + The Phenotype Reconciliation Effort - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/reference/use_cases/index.html b/reference/use_cases/index.html new file mode 100644 index 00000000..6f923b7e --- /dev/null +++ b/reference/use_cases/index.html @@ -0,0 +1,1440 @@ + + + + + + + + + + + + + + + + + + + + + + Use Cases - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/search/lunr.js b/search/lunr.js new file mode 100644 index 00000000..aca0a167 --- /dev/null +++ b/search/lunr.js @@ -0,0 +1,3475 @@ +/** + * lunr - http://lunrjs.com - A bit like Solr, but much smaller and not as bright - 2.3.9 + * Copyright (C) 2020 Oliver Nightingale + * @license MIT + */ + +;(function(){ + +/** + * A convenience function for configuring and constructing + * a new lunr Index. + * + * A lunr.Builder instance is created and the pipeline setup + * with a trimmer, stop word filter and stemmer. + * + * This builder object is yielded to the configuration function + * that is passed as a parameter, allowing the list of fields + * and other builder parameters to be customised. + * + * All documents _must_ be added within the passed config function. + * + * @example + * var idx = lunr(function () { + * this.field('title') + * this.field('body') + * this.ref('id') + * + * documents.forEach(function (doc) { + * this.add(doc) + * }, this) + * }) + * + * @see {@link lunr.Builder} + * @see {@link lunr.Pipeline} + * @see {@link lunr.trimmer} + * @see {@link lunr.stopWordFilter} + * @see {@link lunr.stemmer} + * @namespace {function} lunr + */ +var lunr = function (config) { + var builder = new lunr.Builder + + builder.pipeline.add( + lunr.trimmer, + lunr.stopWordFilter, + lunr.stemmer + ) + + builder.searchPipeline.add( + lunr.stemmer + ) + + config.call(builder, builder) + return builder.build() +} + +lunr.version = "2.3.9" +/*! + * lunr.utils + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * A namespace containing utils for the rest of the lunr library + * @namespace lunr.utils + */ +lunr.utils = {} + +/** + * Print a warning message to the console. + * + * @param {String} message The message to be printed. + * @memberOf lunr.utils + * @function + */ +lunr.utils.warn = (function (global) { + /* eslint-disable no-console */ + return function (message) { + if (global.console && console.warn) { + console.warn(message) + } + } + /* eslint-enable no-console */ +})(this) + +/** + * Convert an object to a string. + * + * In the case of `null` and `undefined` the function returns + * the empty string, in all other cases the result of calling + * `toString` on the passed object is returned. + * + * @param {Any} obj The object to convert to a string. + * @return {String} string representation of the passed object. + * @memberOf lunr.utils + */ +lunr.utils.asString = function (obj) { + if (obj === void 0 || obj === null) { + return "" + } else { + return obj.toString() + } +} + +/** + * Clones an object. + * + * Will create a copy of an existing object such that any mutations + * on the copy cannot affect the original. + * + * Only shallow objects are supported, passing a nested object to this + * function will cause a TypeError. + * + * Objects with primitives, and arrays of primitives are supported. + * + * @param {Object} obj The object to clone. + * @return {Object} a clone of the passed object. + * @throws {TypeError} when a nested object is passed. + * @memberOf Utils + */ +lunr.utils.clone = function (obj) { + if (obj === null || obj === undefined) { + return obj + } + + var clone = Object.create(null), + keys = Object.keys(obj) + + for (var i = 0; i < keys.length; i++) { + var key = keys[i], + val = obj[key] + + if (Array.isArray(val)) { + clone[key] = val.slice() + continue + } + + if (typeof val === 'string' || + typeof val === 'number' || + typeof val === 'boolean') { + clone[key] = val + continue + } + + throw new TypeError("clone is not deep and does not support nested objects") + } + + return clone +} +lunr.FieldRef = function (docRef, fieldName, stringValue) { + this.docRef = docRef + this.fieldName = fieldName + this._stringValue = stringValue +} + +lunr.FieldRef.joiner = "/" + +lunr.FieldRef.fromString = function (s) { + var n = s.indexOf(lunr.FieldRef.joiner) + + if (n === -1) { + throw "malformed field ref string" + } + + var fieldRef = s.slice(0, n), + docRef = s.slice(n + 1) + + return new lunr.FieldRef (docRef, fieldRef, s) +} + +lunr.FieldRef.prototype.toString = function () { + if (this._stringValue == undefined) { + this._stringValue = this.fieldName + lunr.FieldRef.joiner + this.docRef + } + + return this._stringValue +} +/*! + * lunr.Set + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * A lunr set. + * + * @constructor + */ +lunr.Set = function (elements) { + this.elements = Object.create(null) + + if (elements) { + this.length = elements.length + + for (var i = 0; i < this.length; i++) { + this.elements[elements[i]] = true + } + } else { + this.length = 0 + } +} + +/** + * A complete set that contains all elements. + * + * @static + * @readonly + * @type {lunr.Set} + */ +lunr.Set.complete = { + intersect: function (other) { + return other + }, + + union: function () { + return this + }, + + contains: function () { + return true + } +} + +/** + * An empty set that contains no elements. + * + * @static + * @readonly + * @type {lunr.Set} + */ +lunr.Set.empty = { + intersect: function () { + return this + }, + + union: function (other) { + return other + }, + + contains: function () { + return false + } +} + +/** + * Returns true if this set contains the specified object. + * + * @param {object} object - Object whose presence in this set is to be tested. + * @returns {boolean} - True if this set contains the specified object. + */ +lunr.Set.prototype.contains = function (object) { + return !!this.elements[object] +} + +/** + * Returns a new set containing only the elements that are present in both + * this set and the specified set. + * + * @param {lunr.Set} other - set to intersect with this set. + * @returns {lunr.Set} a new set that is the intersection of this and the specified set. + */ + +lunr.Set.prototype.intersect = function (other) { + var a, b, elements, intersection = [] + + if (other === lunr.Set.complete) { + return this + } + + if (other === lunr.Set.empty) { + return other + } + + if (this.length < other.length) { + a = this + b = other + } else { + a = other + b = this + } + + elements = Object.keys(a.elements) + + for (var i = 0; i < elements.length; i++) { + var element = elements[i] + if (element in b.elements) { + intersection.push(element) + } + } + + return new lunr.Set (intersection) +} + +/** + * Returns a new set combining the elements of this and the specified set. + * + * @param {lunr.Set} other - set to union with this set. + * @return {lunr.Set} a new set that is the union of this and the specified set. + */ + +lunr.Set.prototype.union = function (other) { + if (other === lunr.Set.complete) { + return lunr.Set.complete + } + + if (other === lunr.Set.empty) { + return this + } + + return new lunr.Set(Object.keys(this.elements).concat(Object.keys(other.elements))) +} +/** + * A function to calculate the inverse document frequency for + * a posting. This is shared between the builder and the index + * + * @private + * @param {object} posting - The posting for a given term + * @param {number} documentCount - The total number of documents. + */ +lunr.idf = function (posting, documentCount) { + var documentsWithTerm = 0 + + for (var fieldName in posting) { + if (fieldName == '_index') continue // Ignore the term index, its not a field + documentsWithTerm += Object.keys(posting[fieldName]).length + } + + var x = (documentCount - documentsWithTerm + 0.5) / (documentsWithTerm + 0.5) + + return Math.log(1 + Math.abs(x)) +} + +/** + * A token wraps a string representation of a token + * as it is passed through the text processing pipeline. + * + * @constructor + * @param {string} [str=''] - The string token being wrapped. + * @param {object} [metadata={}] - Metadata associated with this token. + */ +lunr.Token = function (str, metadata) { + this.str = str || "" + this.metadata = metadata || {} +} + +/** + * Returns the token string that is being wrapped by this object. + * + * @returns {string} + */ +lunr.Token.prototype.toString = function () { + return this.str +} + +/** + * A token update function is used when updating or optionally + * when cloning a token. + * + * @callback lunr.Token~updateFunction + * @param {string} str - The string representation of the token. + * @param {Object} metadata - All metadata associated with this token. + */ + +/** + * Applies the given function to the wrapped string token. + * + * @example + * token.update(function (str, metadata) { + * return str.toUpperCase() + * }) + * + * @param {lunr.Token~updateFunction} fn - A function to apply to the token string. + * @returns {lunr.Token} + */ +lunr.Token.prototype.update = function (fn) { + this.str = fn(this.str, this.metadata) + return this +} + +/** + * Creates a clone of this token. Optionally a function can be + * applied to the cloned token. + * + * @param {lunr.Token~updateFunction} [fn] - An optional function to apply to the cloned token. + * @returns {lunr.Token} + */ +lunr.Token.prototype.clone = function (fn) { + fn = fn || function (s) { return s } + return new lunr.Token (fn(this.str, this.metadata), this.metadata) +} +/*! + * lunr.tokenizer + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * A function for splitting a string into tokens ready to be inserted into + * the search index. Uses `lunr.tokenizer.separator` to split strings, change + * the value of this property to change how strings are split into tokens. + * + * This tokenizer will convert its parameter to a string by calling `toString` and + * then will split this string on the character in `lunr.tokenizer.separator`. + * Arrays will have their elements converted to strings and wrapped in a lunr.Token. + * + * Optional metadata can be passed to the tokenizer, this metadata will be cloned and + * added as metadata to every token that is created from the object to be tokenized. + * + * @static + * @param {?(string|object|object[])} obj - The object to convert into tokens + * @param {?object} metadata - Optional metadata to associate with every token + * @returns {lunr.Token[]} + * @see {@link lunr.Pipeline} + */ +lunr.tokenizer = function (obj, metadata) { + if (obj == null || obj == undefined) { + return [] + } + + if (Array.isArray(obj)) { + return obj.map(function (t) { + return new lunr.Token( + lunr.utils.asString(t).toLowerCase(), + lunr.utils.clone(metadata) + ) + }) + } + + var str = obj.toString().toLowerCase(), + len = str.length, + tokens = [] + + for (var sliceEnd = 0, sliceStart = 0; sliceEnd <= len; sliceEnd++) { + var char = str.charAt(sliceEnd), + sliceLength = sliceEnd - sliceStart + + if ((char.match(lunr.tokenizer.separator) || sliceEnd == len)) { + + if (sliceLength > 0) { + var tokenMetadata = lunr.utils.clone(metadata) || {} + tokenMetadata["position"] = [sliceStart, sliceLength] + tokenMetadata["index"] = tokens.length + + tokens.push( + new lunr.Token ( + str.slice(sliceStart, sliceEnd), + tokenMetadata + ) + ) + } + + sliceStart = sliceEnd + 1 + } + + } + + return tokens +} + +/** + * The separator used to split a string into tokens. Override this property to change the behaviour of + * `lunr.tokenizer` behaviour when tokenizing strings. By default this splits on whitespace and hyphens. + * + * @static + * @see lunr.tokenizer + */ +lunr.tokenizer.separator = /[\s\-]+/ +/*! + * lunr.Pipeline + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * lunr.Pipelines maintain an ordered list of functions to be applied to all + * tokens in documents entering the search index and queries being ran against + * the index. + * + * An instance of lunr.Index created with the lunr shortcut will contain a + * pipeline with a stop word filter and an English language stemmer. Extra + * functions can be added before or after either of these functions or these + * default functions can be removed. + * + * When run the pipeline will call each function in turn, passing a token, the + * index of that token in the original list of all tokens and finally a list of + * all the original tokens. + * + * The output of functions in the pipeline will be passed to the next function + * in the pipeline. To exclude a token from entering the index the function + * should return undefined, the rest of the pipeline will not be called with + * this token. + * + * For serialisation of pipelines to work, all functions used in an instance of + * a pipeline should be registered with lunr.Pipeline. Registered functions can + * then be loaded. If trying to load a serialised pipeline that uses functions + * that are not registered an error will be thrown. + * + * If not planning on serialising the pipeline then registering pipeline functions + * is not necessary. + * + * @constructor + */ +lunr.Pipeline = function () { + this._stack = [] +} + +lunr.Pipeline.registeredFunctions = Object.create(null) + +/** + * A pipeline function maps lunr.Token to lunr.Token. A lunr.Token contains the token + * string as well as all known metadata. A pipeline function can mutate the token string + * or mutate (or add) metadata for a given token. + * + * A pipeline function can indicate that the passed token should be discarded by returning + * null, undefined or an empty string. This token will not be passed to any downstream pipeline + * functions and will not be added to the index. + * + * Multiple tokens can be returned by returning an array of tokens. Each token will be passed + * to any downstream pipeline functions and all will returned tokens will be added to the index. + * + * Any number of pipeline functions may be chained together using a lunr.Pipeline. + * + * @interface lunr.PipelineFunction + * @param {lunr.Token} token - A token from the document being processed. + * @param {number} i - The index of this token in the complete list of tokens for this document/field. + * @param {lunr.Token[]} tokens - All tokens for this document/field. + * @returns {(?lunr.Token|lunr.Token[])} + */ + +/** + * Register a function with the pipeline. + * + * Functions that are used in the pipeline should be registered if the pipeline + * needs to be serialised, or a serialised pipeline needs to be loaded. + * + * Registering a function does not add it to a pipeline, functions must still be + * added to instances of the pipeline for them to be used when running a pipeline. + * + * @param {lunr.PipelineFunction} fn - The function to check for. + * @param {String} label - The label to register this function with + */ +lunr.Pipeline.registerFunction = function (fn, label) { + if (label in this.registeredFunctions) { + lunr.utils.warn('Overwriting existing registered function: ' + label) + } + + fn.label = label + lunr.Pipeline.registeredFunctions[fn.label] = fn +} + +/** + * Warns if the function is not registered as a Pipeline function. + * + * @param {lunr.PipelineFunction} fn - The function to check for. + * @private + */ +lunr.Pipeline.warnIfFunctionNotRegistered = function (fn) { + var isRegistered = fn.label && (fn.label in this.registeredFunctions) + + if (!isRegistered) { + lunr.utils.warn('Function is not registered with pipeline. This may cause problems when serialising the index.\n', fn) + } +} + +/** + * Loads a previously serialised pipeline. + * + * All functions to be loaded must already be registered with lunr.Pipeline. + * If any function from the serialised data has not been registered then an + * error will be thrown. + * + * @param {Object} serialised - The serialised pipeline to load. + * @returns {lunr.Pipeline} + */ +lunr.Pipeline.load = function (serialised) { + var pipeline = new lunr.Pipeline + + serialised.forEach(function (fnName) { + var fn = lunr.Pipeline.registeredFunctions[fnName] + + if (fn) { + pipeline.add(fn) + } else { + throw new Error('Cannot load unregistered function: ' + fnName) + } + }) + + return pipeline +} + +/** + * Adds new functions to the end of the pipeline. + * + * Logs a warning if the function has not been registered. + * + * @param {lunr.PipelineFunction[]} functions - Any number of functions to add to the pipeline. + */ +lunr.Pipeline.prototype.add = function () { + var fns = Array.prototype.slice.call(arguments) + + fns.forEach(function (fn) { + lunr.Pipeline.warnIfFunctionNotRegistered(fn) + this._stack.push(fn) + }, this) +} + +/** + * Adds a single function after a function that already exists in the + * pipeline. + * + * Logs a warning if the function has not been registered. + * + * @param {lunr.PipelineFunction} existingFn - A function that already exists in the pipeline. + * @param {lunr.PipelineFunction} newFn - The new function to add to the pipeline. + */ +lunr.Pipeline.prototype.after = function (existingFn, newFn) { + lunr.Pipeline.warnIfFunctionNotRegistered(newFn) + + var pos = this._stack.indexOf(existingFn) + if (pos == -1) { + throw new Error('Cannot find existingFn') + } + + pos = pos + 1 + this._stack.splice(pos, 0, newFn) +} + +/** + * Adds a single function before a function that already exists in the + * pipeline. + * + * Logs a warning if the function has not been registered. + * + * @param {lunr.PipelineFunction} existingFn - A function that already exists in the pipeline. + * @param {lunr.PipelineFunction} newFn - The new function to add to the pipeline. + */ +lunr.Pipeline.prototype.before = function (existingFn, newFn) { + lunr.Pipeline.warnIfFunctionNotRegistered(newFn) + + var pos = this._stack.indexOf(existingFn) + if (pos == -1) { + throw new Error('Cannot find existingFn') + } + + this._stack.splice(pos, 0, newFn) +} + +/** + * Removes a function from the pipeline. + * + * @param {lunr.PipelineFunction} fn The function to remove from the pipeline. + */ +lunr.Pipeline.prototype.remove = function (fn) { + var pos = this._stack.indexOf(fn) + if (pos == -1) { + return + } + + this._stack.splice(pos, 1) +} + +/** + * Runs the current list of functions that make up the pipeline against the + * passed tokens. + * + * @param {Array} tokens The tokens to run through the pipeline. + * @returns {Array} + */ +lunr.Pipeline.prototype.run = function (tokens) { + var stackLength = this._stack.length + + for (var i = 0; i < stackLength; i++) { + var fn = this._stack[i] + var memo = [] + + for (var j = 0; j < tokens.length; j++) { + var result = fn(tokens[j], j, tokens) + + if (result === null || result === void 0 || result === '') continue + + if (Array.isArray(result)) { + for (var k = 0; k < result.length; k++) { + memo.push(result[k]) + } + } else { + memo.push(result) + } + } + + tokens = memo + } + + return tokens +} + +/** + * Convenience method for passing a string through a pipeline and getting + * strings out. This method takes care of wrapping the passed string in a + * token and mapping the resulting tokens back to strings. + * + * @param {string} str - The string to pass through the pipeline. + * @param {?object} metadata - Optional metadata to associate with the token + * passed to the pipeline. + * @returns {string[]} + */ +lunr.Pipeline.prototype.runString = function (str, metadata) { + var token = new lunr.Token (str, metadata) + + return this.run([token]).map(function (t) { + return t.toString() + }) +} + +/** + * Resets the pipeline by removing any existing processors. + * + */ +lunr.Pipeline.prototype.reset = function () { + this._stack = [] +} + +/** + * Returns a representation of the pipeline ready for serialisation. + * + * Logs a warning if the function has not been registered. + * + * @returns {Array} + */ +lunr.Pipeline.prototype.toJSON = function () { + return this._stack.map(function (fn) { + lunr.Pipeline.warnIfFunctionNotRegistered(fn) + + return fn.label + }) +} +/*! + * lunr.Vector + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * A vector is used to construct the vector space of documents and queries. These + * vectors support operations to determine the similarity between two documents or + * a document and a query. + * + * Normally no parameters are required for initializing a vector, but in the case of + * loading a previously dumped vector the raw elements can be provided to the constructor. + * + * For performance reasons vectors are implemented with a flat array, where an elements + * index is immediately followed by its value. E.g. [index, value, index, value]. This + * allows the underlying array to be as sparse as possible and still offer decent + * performance when being used for vector calculations. + * + * @constructor + * @param {Number[]} [elements] - The flat list of element index and element value pairs. + */ +lunr.Vector = function (elements) { + this._magnitude = 0 + this.elements = elements || [] +} + + +/** + * Calculates the position within the vector to insert a given index. + * + * This is used internally by insert and upsert. If there are duplicate indexes then + * the position is returned as if the value for that index were to be updated, but it + * is the callers responsibility to check whether there is a duplicate at that index + * + * @param {Number} insertIdx - The index at which the element should be inserted. + * @returns {Number} + */ +lunr.Vector.prototype.positionForIndex = function (index) { + // For an empty vector the tuple can be inserted at the beginning + if (this.elements.length == 0) { + return 0 + } + + var start = 0, + end = this.elements.length / 2, + sliceLength = end - start, + pivotPoint = Math.floor(sliceLength / 2), + pivotIndex = this.elements[pivotPoint * 2] + + while (sliceLength > 1) { + if (pivotIndex < index) { + start = pivotPoint + } + + if (pivotIndex > index) { + end = pivotPoint + } + + if (pivotIndex == index) { + break + } + + sliceLength = end - start + pivotPoint = start + Math.floor(sliceLength / 2) + pivotIndex = this.elements[pivotPoint * 2] + } + + if (pivotIndex == index) { + return pivotPoint * 2 + } + + if (pivotIndex > index) { + return pivotPoint * 2 + } + + if (pivotIndex < index) { + return (pivotPoint + 1) * 2 + } +} + +/** + * Inserts an element at an index within the vector. + * + * Does not allow duplicates, will throw an error if there is already an entry + * for this index. + * + * @param {Number} insertIdx - The index at which the element should be inserted. + * @param {Number} val - The value to be inserted into the vector. + */ +lunr.Vector.prototype.insert = function (insertIdx, val) { + this.upsert(insertIdx, val, function () { + throw "duplicate index" + }) +} + +/** + * Inserts or updates an existing index within the vector. + * + * @param {Number} insertIdx - The index at which the element should be inserted. + * @param {Number} val - The value to be inserted into the vector. + * @param {function} fn - A function that is called for updates, the existing value and the + * requested value are passed as arguments + */ +lunr.Vector.prototype.upsert = function (insertIdx, val, fn) { + this._magnitude = 0 + var position = this.positionForIndex(insertIdx) + + if (this.elements[position] == insertIdx) { + this.elements[position + 1] = fn(this.elements[position + 1], val) + } else { + this.elements.splice(position, 0, insertIdx, val) + } +} + +/** + * Calculates the magnitude of this vector. + * + * @returns {Number} + */ +lunr.Vector.prototype.magnitude = function () { + if (this._magnitude) return this._magnitude + + var sumOfSquares = 0, + elementsLength = this.elements.length + + for (var i = 1; i < elementsLength; i += 2) { + var val = this.elements[i] + sumOfSquares += val * val + } + + return this._magnitude = Math.sqrt(sumOfSquares) +} + +/** + * Calculates the dot product of this vector and another vector. + * + * @param {lunr.Vector} otherVector - The vector to compute the dot product with. + * @returns {Number} + */ +lunr.Vector.prototype.dot = function (otherVector) { + var dotProduct = 0, + a = this.elements, b = otherVector.elements, + aLen = a.length, bLen = b.length, + aVal = 0, bVal = 0, + i = 0, j = 0 + + while (i < aLen && j < bLen) { + aVal = a[i], bVal = b[j] + if (aVal < bVal) { + i += 2 + } else if (aVal > bVal) { + j += 2 + } else if (aVal == bVal) { + dotProduct += a[i + 1] * b[j + 1] + i += 2 + j += 2 + } + } + + return dotProduct +} + +/** + * Calculates the similarity between this vector and another vector. + * + * @param {lunr.Vector} otherVector - The other vector to calculate the + * similarity with. + * @returns {Number} + */ +lunr.Vector.prototype.similarity = function (otherVector) { + return this.dot(otherVector) / this.magnitude() || 0 +} + +/** + * Converts the vector to an array of the elements within the vector. + * + * @returns {Number[]} + */ +lunr.Vector.prototype.toArray = function () { + var output = new Array (this.elements.length / 2) + + for (var i = 1, j = 0; i < this.elements.length; i += 2, j++) { + output[j] = this.elements[i] + } + + return output +} + +/** + * A JSON serializable representation of the vector. + * + * @returns {Number[]} + */ +lunr.Vector.prototype.toJSON = function () { + return this.elements +} +/* eslint-disable */ +/*! + * lunr.stemmer + * Copyright (C) 2020 Oliver Nightingale + * Includes code from - http://tartarus.org/~martin/PorterStemmer/js.txt + */ + +/** + * lunr.stemmer is an english language stemmer, this is a JavaScript + * implementation of the PorterStemmer taken from http://tartarus.org/~martin + * + * @static + * @implements {lunr.PipelineFunction} + * @param {lunr.Token} token - The string to stem + * @returns {lunr.Token} + * @see {@link lunr.Pipeline} + * @function + */ +lunr.stemmer = (function(){ + var step2list = { + "ational" : "ate", + "tional" : "tion", + "enci" : "ence", + "anci" : "ance", + "izer" : "ize", + "bli" : "ble", + "alli" : "al", + "entli" : "ent", + "eli" : "e", + "ousli" : "ous", + "ization" : "ize", + "ation" : "ate", + "ator" : "ate", + "alism" : "al", + "iveness" : "ive", + "fulness" : "ful", + "ousness" : "ous", + "aliti" : "al", + "iviti" : "ive", + "biliti" : "ble", + "logi" : "log" + }, + + step3list = { + "icate" : "ic", + "ative" : "", + "alize" : "al", + "iciti" : "ic", + "ical" : "ic", + "ful" : "", + "ness" : "" + }, + + c = "[^aeiou]", // consonant + v = "[aeiouy]", // vowel + C = c + "[^aeiouy]*", // consonant sequence + V = v + "[aeiou]*", // vowel sequence + + mgr0 = "^(" + C + ")?" + V + C, // [C]VC... is m>0 + meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$", // [C]VC[V] is m=1 + mgr1 = "^(" + C + ")?" + V + C + V + C, // [C]VCVC... is m>1 + s_v = "^(" + C + ")?" + v; // vowel in stem + + var re_mgr0 = new RegExp(mgr0); + var re_mgr1 = new RegExp(mgr1); + var re_meq1 = new RegExp(meq1); + var re_s_v = new RegExp(s_v); + + var re_1a = /^(.+?)(ss|i)es$/; + var re2_1a = /^(.+?)([^s])s$/; + var re_1b = /^(.+?)eed$/; + var re2_1b = /^(.+?)(ed|ing)$/; + var re_1b_2 = /.$/; + var re2_1b_2 = /(at|bl|iz)$/; + var re3_1b_2 = new RegExp("([^aeiouylsz])\\1$"); + var re4_1b_2 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + + var re_1c = /^(.+?[^aeiou])y$/; + var re_2 = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; + + var re_3 = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; + + var re_4 = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; + var re2_4 = /^(.+?)(s|t)(ion)$/; + + var re_5 = /^(.+?)e$/; + var re_5_1 = /ll$/; + var re3_5 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + + var porterStemmer = function porterStemmer(w) { + var stem, + suffix, + firstch, + re, + re2, + re3, + re4; + + if (w.length < 3) { return w; } + + firstch = w.substr(0,1); + if (firstch == "y") { + w = firstch.toUpperCase() + w.substr(1); + } + + // Step 1a + re = re_1a + re2 = re2_1a; + + if (re.test(w)) { w = w.replace(re,"$1$2"); } + else if (re2.test(w)) { w = w.replace(re2,"$1$2"); } + + // Step 1b + re = re_1b; + re2 = re2_1b; + if (re.test(w)) { + var fp = re.exec(w); + re = re_mgr0; + if (re.test(fp[1])) { + re = re_1b_2; + w = w.replace(re,""); + } + } else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1]; + re2 = re_s_v; + if (re2.test(stem)) { + w = stem; + re2 = re2_1b_2; + re3 = re3_1b_2; + re4 = re4_1b_2; + if (re2.test(w)) { w = w + "e"; } + else if (re3.test(w)) { re = re_1b_2; w = w.replace(re,""); } + else if (re4.test(w)) { w = w + "e"; } + } + } + + // Step 1c - replace suffix y or Y by i if preceded by a non-vowel which is not the first letter of the word (so cry -> cri, by -> by, say -> say) + re = re_1c; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + w = stem + "i"; + } + + // Step 2 + re = re_2; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = re_mgr0; + if (re.test(stem)) { + w = stem + step2list[suffix]; + } + } + + // Step 3 + re = re_3; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = re_mgr0; + if (re.test(stem)) { + w = stem + step3list[suffix]; + } + } + + // Step 4 + re = re_4; + re2 = re2_4; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = re_mgr1; + if (re.test(stem)) { + w = stem; + } + } else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1] + fp[2]; + re2 = re_mgr1; + if (re2.test(stem)) { + w = stem; + } + } + + // Step 5 + re = re_5; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = re_mgr1; + re2 = re_meq1; + re3 = re3_5; + if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) { + w = stem; + } + } + + re = re_5_1; + re2 = re_mgr1; + if (re.test(w) && re2.test(w)) { + re = re_1b_2; + w = w.replace(re,""); + } + + // and turn initial Y back to y + + if (firstch == "y") { + w = firstch.toLowerCase() + w.substr(1); + } + + return w; + }; + + return function (token) { + return token.update(porterStemmer); + } +})(); + +lunr.Pipeline.registerFunction(lunr.stemmer, 'stemmer') +/*! + * lunr.stopWordFilter + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * lunr.generateStopWordFilter builds a stopWordFilter function from the provided + * list of stop words. + * + * The built in lunr.stopWordFilter is built using this generator and can be used + * to generate custom stopWordFilters for applications or non English languages. + * + * @function + * @param {Array} token The token to pass through the filter + * @returns {lunr.PipelineFunction} + * @see lunr.Pipeline + * @see lunr.stopWordFilter + */ +lunr.generateStopWordFilter = function (stopWords) { + var words = stopWords.reduce(function (memo, stopWord) { + memo[stopWord] = stopWord + return memo + }, {}) + + return function (token) { + if (token && words[token.toString()] !== token.toString()) return token + } +} + +/** + * lunr.stopWordFilter is an English language stop word list filter, any words + * contained in the list will not be passed through the filter. + * + * This is intended to be used in the Pipeline. If the token does not pass the + * filter then undefined will be returned. + * + * @function + * @implements {lunr.PipelineFunction} + * @params {lunr.Token} token - A token to check for being a stop word. + * @returns {lunr.Token} + * @see {@link lunr.Pipeline} + */ +lunr.stopWordFilter = lunr.generateStopWordFilter([ + 'a', + 'able', + 'about', + 'across', + 'after', + 'all', + 'almost', + 'also', + 'am', + 'among', + 'an', + 'and', + 'any', + 'are', + 'as', + 'at', + 'be', + 'because', + 'been', + 'but', + 'by', + 'can', + 'cannot', + 'could', + 'dear', + 'did', + 'do', + 'does', + 'either', + 'else', + 'ever', + 'every', + 'for', + 'from', + 'get', + 'got', + 'had', + 'has', + 'have', + 'he', + 'her', + 'hers', + 'him', + 'his', + 'how', + 'however', + 'i', + 'if', + 'in', + 'into', + 'is', + 'it', + 'its', + 'just', + 'least', + 'let', + 'like', + 'likely', + 'may', + 'me', + 'might', + 'most', + 'must', + 'my', + 'neither', + 'no', + 'nor', + 'not', + 'of', + 'off', + 'often', + 'on', + 'only', + 'or', + 'other', + 'our', + 'own', + 'rather', + 'said', + 'say', + 'says', + 'she', + 'should', + 'since', + 'so', + 'some', + 'than', + 'that', + 'the', + 'their', + 'them', + 'then', + 'there', + 'these', + 'they', + 'this', + 'tis', + 'to', + 'too', + 'twas', + 'us', + 'wants', + 'was', + 'we', + 'were', + 'what', + 'when', + 'where', + 'which', + 'while', + 'who', + 'whom', + 'why', + 'will', + 'with', + 'would', + 'yet', + 'you', + 'your' +]) + +lunr.Pipeline.registerFunction(lunr.stopWordFilter, 'stopWordFilter') +/*! + * lunr.trimmer + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * lunr.trimmer is a pipeline function for trimming non word + * characters from the beginning and end of tokens before they + * enter the index. + * + * This implementation may not work correctly for non latin + * characters and should either be removed or adapted for use + * with languages with non-latin characters. + * + * @static + * @implements {lunr.PipelineFunction} + * @param {lunr.Token} token The token to pass through the filter + * @returns {lunr.Token} + * @see lunr.Pipeline + */ +lunr.trimmer = function (token) { + return token.update(function (s) { + return s.replace(/^\W+/, '').replace(/\W+$/, '') + }) +} + +lunr.Pipeline.registerFunction(lunr.trimmer, 'trimmer') +/*! + * lunr.TokenSet + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * A token set is used to store the unique list of all tokens + * within an index. Token sets are also used to represent an + * incoming query to the index, this query token set and index + * token set are then intersected to find which tokens to look + * up in the inverted index. + * + * A token set can hold multiple tokens, as in the case of the + * index token set, or it can hold a single token as in the + * case of a simple query token set. + * + * Additionally token sets are used to perform wildcard matching. + * Leading, contained and trailing wildcards are supported, and + * from this edit distance matching can also be provided. + * + * Token sets are implemented as a minimal finite state automata, + * where both common prefixes and suffixes are shared between tokens. + * This helps to reduce the space used for storing the token set. + * + * @constructor + */ +lunr.TokenSet = function () { + this.final = false + this.edges = {} + this.id = lunr.TokenSet._nextId + lunr.TokenSet._nextId += 1 +} + +/** + * Keeps track of the next, auto increment, identifier to assign + * to a new tokenSet. + * + * TokenSets require a unique identifier to be correctly minimised. + * + * @private + */ +lunr.TokenSet._nextId = 1 + +/** + * Creates a TokenSet instance from the given sorted array of words. + * + * @param {String[]} arr - A sorted array of strings to create the set from. + * @returns {lunr.TokenSet} + * @throws Will throw an error if the input array is not sorted. + */ +lunr.TokenSet.fromArray = function (arr) { + var builder = new lunr.TokenSet.Builder + + for (var i = 0, len = arr.length; i < len; i++) { + builder.insert(arr[i]) + } + + builder.finish() + return builder.root +} + +/** + * Creates a token set from a query clause. + * + * @private + * @param {Object} clause - A single clause from lunr.Query. + * @param {string} clause.term - The query clause term. + * @param {number} [clause.editDistance] - The optional edit distance for the term. + * @returns {lunr.TokenSet} + */ +lunr.TokenSet.fromClause = function (clause) { + if ('editDistance' in clause) { + return lunr.TokenSet.fromFuzzyString(clause.term, clause.editDistance) + } else { + return lunr.TokenSet.fromString(clause.term) + } +} + +/** + * Creates a token set representing a single string with a specified + * edit distance. + * + * Insertions, deletions, substitutions and transpositions are each + * treated as an edit distance of 1. + * + * Increasing the allowed edit distance will have a dramatic impact + * on the performance of both creating and intersecting these TokenSets. + * It is advised to keep the edit distance less than 3. + * + * @param {string} str - The string to create the token set from. + * @param {number} editDistance - The allowed edit distance to match. + * @returns {lunr.Vector} + */ +lunr.TokenSet.fromFuzzyString = function (str, editDistance) { + var root = new lunr.TokenSet + + var stack = [{ + node: root, + editsRemaining: editDistance, + str: str + }] + + while (stack.length) { + var frame = stack.pop() + + // no edit + if (frame.str.length > 0) { + var char = frame.str.charAt(0), + noEditNode + + if (char in frame.node.edges) { + noEditNode = frame.node.edges[char] + } else { + noEditNode = new lunr.TokenSet + frame.node.edges[char] = noEditNode + } + + if (frame.str.length == 1) { + noEditNode.final = true + } + + stack.push({ + node: noEditNode, + editsRemaining: frame.editsRemaining, + str: frame.str.slice(1) + }) + } + + if (frame.editsRemaining == 0) { + continue + } + + // insertion + if ("*" in frame.node.edges) { + var insertionNode = frame.node.edges["*"] + } else { + var insertionNode = new lunr.TokenSet + frame.node.edges["*"] = insertionNode + } + + if (frame.str.length == 0) { + insertionNode.final = true + } + + stack.push({ + node: insertionNode, + editsRemaining: frame.editsRemaining - 1, + str: frame.str + }) + + // deletion + // can only do a deletion if we have enough edits remaining + // and if there are characters left to delete in the string + if (frame.str.length > 1) { + stack.push({ + node: frame.node, + editsRemaining: frame.editsRemaining - 1, + str: frame.str.slice(1) + }) + } + + // deletion + // just removing the last character from the str + if (frame.str.length == 1) { + frame.node.final = true + } + + // substitution + // can only do a substitution if we have enough edits remaining + // and if there are characters left to substitute + if (frame.str.length >= 1) { + if ("*" in frame.node.edges) { + var substitutionNode = frame.node.edges["*"] + } else { + var substitutionNode = new lunr.TokenSet + frame.node.edges["*"] = substitutionNode + } + + if (frame.str.length == 1) { + substitutionNode.final = true + } + + stack.push({ + node: substitutionNode, + editsRemaining: frame.editsRemaining - 1, + str: frame.str.slice(1) + }) + } + + // transposition + // can only do a transposition if there are edits remaining + // and there are enough characters to transpose + if (frame.str.length > 1) { + var charA = frame.str.charAt(0), + charB = frame.str.charAt(1), + transposeNode + + if (charB in frame.node.edges) { + transposeNode = frame.node.edges[charB] + } else { + transposeNode = new lunr.TokenSet + frame.node.edges[charB] = transposeNode + } + + if (frame.str.length == 1) { + transposeNode.final = true + } + + stack.push({ + node: transposeNode, + editsRemaining: frame.editsRemaining - 1, + str: charA + frame.str.slice(2) + }) + } + } + + return root +} + +/** + * Creates a TokenSet from a string. + * + * The string may contain one or more wildcard characters (*) + * that will allow wildcard matching when intersecting with + * another TokenSet. + * + * @param {string} str - The string to create a TokenSet from. + * @returns {lunr.TokenSet} + */ +lunr.TokenSet.fromString = function (str) { + var node = new lunr.TokenSet, + root = node + + /* + * Iterates through all characters within the passed string + * appending a node for each character. + * + * When a wildcard character is found then a self + * referencing edge is introduced to continually match + * any number of any characters. + */ + for (var i = 0, len = str.length; i < len; i++) { + var char = str[i], + final = (i == len - 1) + + if (char == "*") { + node.edges[char] = node + node.final = final + + } else { + var next = new lunr.TokenSet + next.final = final + + node.edges[char] = next + node = next + } + } + + return root +} + +/** + * Converts this TokenSet into an array of strings + * contained within the TokenSet. + * + * This is not intended to be used on a TokenSet that + * contains wildcards, in these cases the results are + * undefined and are likely to cause an infinite loop. + * + * @returns {string[]} + */ +lunr.TokenSet.prototype.toArray = function () { + var words = [] + + var stack = [{ + prefix: "", + node: this + }] + + while (stack.length) { + var frame = stack.pop(), + edges = Object.keys(frame.node.edges), + len = edges.length + + if (frame.node.final) { + /* In Safari, at this point the prefix is sometimes corrupted, see: + * https://github.com/olivernn/lunr.js/issues/279 Calling any + * String.prototype method forces Safari to "cast" this string to what + * it's supposed to be, fixing the bug. */ + frame.prefix.charAt(0) + words.push(frame.prefix) + } + + for (var i = 0; i < len; i++) { + var edge = edges[i] + + stack.push({ + prefix: frame.prefix.concat(edge), + node: frame.node.edges[edge] + }) + } + } + + return words +} + +/** + * Generates a string representation of a TokenSet. + * + * This is intended to allow TokenSets to be used as keys + * in objects, largely to aid the construction and minimisation + * of a TokenSet. As such it is not designed to be a human + * friendly representation of the TokenSet. + * + * @returns {string} + */ +lunr.TokenSet.prototype.toString = function () { + // NOTE: Using Object.keys here as this.edges is very likely + // to enter 'hash-mode' with many keys being added + // + // avoiding a for-in loop here as it leads to the function + // being de-optimised (at least in V8). From some simple + // benchmarks the performance is comparable, but allowing + // V8 to optimize may mean easy performance wins in the future. + + if (this._str) { + return this._str + } + + var str = this.final ? '1' : '0', + labels = Object.keys(this.edges).sort(), + len = labels.length + + for (var i = 0; i < len; i++) { + var label = labels[i], + node = this.edges[label] + + str = str + label + node.id + } + + return str +} + +/** + * Returns a new TokenSet that is the intersection of + * this TokenSet and the passed TokenSet. + * + * This intersection will take into account any wildcards + * contained within the TokenSet. + * + * @param {lunr.TokenSet} b - An other TokenSet to intersect with. + * @returns {lunr.TokenSet} + */ +lunr.TokenSet.prototype.intersect = function (b) { + var output = new lunr.TokenSet, + frame = undefined + + var stack = [{ + qNode: b, + output: output, + node: this + }] + + while (stack.length) { + frame = stack.pop() + + // NOTE: As with the #toString method, we are using + // Object.keys and a for loop instead of a for-in loop + // as both of these objects enter 'hash' mode, causing + // the function to be de-optimised in V8 + var qEdges = Object.keys(frame.qNode.edges), + qLen = qEdges.length, + nEdges = Object.keys(frame.node.edges), + nLen = nEdges.length + + for (var q = 0; q < qLen; q++) { + var qEdge = qEdges[q] + + for (var n = 0; n < nLen; n++) { + var nEdge = nEdges[n] + + if (nEdge == qEdge || qEdge == '*') { + var node = frame.node.edges[nEdge], + qNode = frame.qNode.edges[qEdge], + final = node.final && qNode.final, + next = undefined + + if (nEdge in frame.output.edges) { + // an edge already exists for this character + // no need to create a new node, just set the finality + // bit unless this node is already final + next = frame.output.edges[nEdge] + next.final = next.final || final + + } else { + // no edge exists yet, must create one + // set the finality bit and insert it + // into the output + next = new lunr.TokenSet + next.final = final + frame.output.edges[nEdge] = next + } + + stack.push({ + qNode: qNode, + output: next, + node: node + }) + } + } + } + } + + return output +} +lunr.TokenSet.Builder = function () { + this.previousWord = "" + this.root = new lunr.TokenSet + this.uncheckedNodes = [] + this.minimizedNodes = {} +} + +lunr.TokenSet.Builder.prototype.insert = function (word) { + var node, + commonPrefix = 0 + + if (word < this.previousWord) { + throw new Error ("Out of order word insertion") + } + + for (var i = 0; i < word.length && i < this.previousWord.length; i++) { + if (word[i] != this.previousWord[i]) break + commonPrefix++ + } + + this.minimize(commonPrefix) + + if (this.uncheckedNodes.length == 0) { + node = this.root + } else { + node = this.uncheckedNodes[this.uncheckedNodes.length - 1].child + } + + for (var i = commonPrefix; i < word.length; i++) { + var nextNode = new lunr.TokenSet, + char = word[i] + + node.edges[char] = nextNode + + this.uncheckedNodes.push({ + parent: node, + char: char, + child: nextNode + }) + + node = nextNode + } + + node.final = true + this.previousWord = word +} + +lunr.TokenSet.Builder.prototype.finish = function () { + this.minimize(0) +} + +lunr.TokenSet.Builder.prototype.minimize = function (downTo) { + for (var i = this.uncheckedNodes.length - 1; i >= downTo; i--) { + var node = this.uncheckedNodes[i], + childKey = node.child.toString() + + if (childKey in this.minimizedNodes) { + node.parent.edges[node.char] = this.minimizedNodes[childKey] + } else { + // Cache the key for this node since + // we know it can't change anymore + node.child._str = childKey + + this.minimizedNodes[childKey] = node.child + } + + this.uncheckedNodes.pop() + } +} +/*! + * lunr.Index + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * An index contains the built index of all documents and provides a query interface + * to the index. + * + * Usually instances of lunr.Index will not be created using this constructor, instead + * lunr.Builder should be used to construct new indexes, or lunr.Index.load should be + * used to load previously built and serialized indexes. + * + * @constructor + * @param {Object} attrs - The attributes of the built search index. + * @param {Object} attrs.invertedIndex - An index of term/field to document reference. + * @param {Object} attrs.fieldVectors - Field vectors + * @param {lunr.TokenSet} attrs.tokenSet - An set of all corpus tokens. + * @param {string[]} attrs.fields - The names of indexed document fields. + * @param {lunr.Pipeline} attrs.pipeline - The pipeline to use for search terms. + */ +lunr.Index = function (attrs) { + this.invertedIndex = attrs.invertedIndex + this.fieldVectors = attrs.fieldVectors + this.tokenSet = attrs.tokenSet + this.fields = attrs.fields + this.pipeline = attrs.pipeline +} + +/** + * A result contains details of a document matching a search query. + * @typedef {Object} lunr.Index~Result + * @property {string} ref - The reference of the document this result represents. + * @property {number} score - A number between 0 and 1 representing how similar this document is to the query. + * @property {lunr.MatchData} matchData - Contains metadata about this match including which term(s) caused the match. + */ + +/** + * Although lunr provides the ability to create queries using lunr.Query, it also provides a simple + * query language which itself is parsed into an instance of lunr.Query. + * + * For programmatically building queries it is advised to directly use lunr.Query, the query language + * is best used for human entered text rather than program generated text. + * + * At its simplest queries can just be a single term, e.g. `hello`, multiple terms are also supported + * and will be combined with OR, e.g `hello world` will match documents that contain either 'hello' + * or 'world', though those that contain both will rank higher in the results. + * + * Wildcards can be included in terms to match one or more unspecified characters, these wildcards can + * be inserted anywhere within the term, and more than one wildcard can exist in a single term. Adding + * wildcards will increase the number of documents that will be found but can also have a negative + * impact on query performance, especially with wildcards at the beginning of a term. + * + * Terms can be restricted to specific fields, e.g. `title:hello`, only documents with the term + * hello in the title field will match this query. Using a field not present in the index will lead + * to an error being thrown. + * + * Modifiers can also be added to terms, lunr supports edit distance and boost modifiers on terms. A term + * boost will make documents matching that term score higher, e.g. `foo^5`. Edit distance is also supported + * to provide fuzzy matching, e.g. 'hello~2' will match documents with hello with an edit distance of 2. + * Avoid large values for edit distance to improve query performance. + * + * Each term also supports a presence modifier. By default a term's presence in document is optional, however + * this can be changed to either required or prohibited. For a term's presence to be required in a document the + * term should be prefixed with a '+', e.g. `+foo bar` is a search for documents that must contain 'foo' and + * optionally contain 'bar'. Conversely a leading '-' sets the terms presence to prohibited, i.e. it must not + * appear in a document, e.g. `-foo bar` is a search for documents that do not contain 'foo' but may contain 'bar'. + * + * To escape special characters the backslash character '\' can be used, this allows searches to include + * characters that would normally be considered modifiers, e.g. `foo\~2` will search for a term "foo~2" instead + * of attempting to apply a boost of 2 to the search term "foo". + * + * @typedef {string} lunr.Index~QueryString + * @example Simple single term query + * hello + * @example Multiple term query + * hello world + * @example term scoped to a field + * title:hello + * @example term with a boost of 10 + * hello^10 + * @example term with an edit distance of 2 + * hello~2 + * @example terms with presence modifiers + * -foo +bar baz + */ + +/** + * Performs a search against the index using lunr query syntax. + * + * Results will be returned sorted by their score, the most relevant results + * will be returned first. For details on how the score is calculated, please see + * the {@link https://lunrjs.com/guides/searching.html#scoring|guide}. + * + * For more programmatic querying use lunr.Index#query. + * + * @param {lunr.Index~QueryString} queryString - A string containing a lunr query. + * @throws {lunr.QueryParseError} If the passed query string cannot be parsed. + * @returns {lunr.Index~Result[]} + */ +lunr.Index.prototype.search = function (queryString) { + return this.query(function (query) { + var parser = new lunr.QueryParser(queryString, query) + parser.parse() + }) +} + +/** + * A query builder callback provides a query object to be used to express + * the query to perform on the index. + * + * @callback lunr.Index~queryBuilder + * @param {lunr.Query} query - The query object to build up. + * @this lunr.Query + */ + +/** + * Performs a query against the index using the yielded lunr.Query object. + * + * If performing programmatic queries against the index, this method is preferred + * over lunr.Index#search so as to avoid the additional query parsing overhead. + * + * A query object is yielded to the supplied function which should be used to + * express the query to be run against the index. + * + * Note that although this function takes a callback parameter it is _not_ an + * asynchronous operation, the callback is just yielded a query object to be + * customized. + * + * @param {lunr.Index~queryBuilder} fn - A function that is used to build the query. + * @returns {lunr.Index~Result[]} + */ +lunr.Index.prototype.query = function (fn) { + // for each query clause + // * process terms + // * expand terms from token set + // * find matching documents and metadata + // * get document vectors + // * score documents + + var query = new lunr.Query(this.fields), + matchingFields = Object.create(null), + queryVectors = Object.create(null), + termFieldCache = Object.create(null), + requiredMatches = Object.create(null), + prohibitedMatches = Object.create(null) + + /* + * To support field level boosts a query vector is created per + * field. An empty vector is eagerly created to support negated + * queries. + */ + for (var i = 0; i < this.fields.length; i++) { + queryVectors[this.fields[i]] = new lunr.Vector + } + + fn.call(query, query) + + for (var i = 0; i < query.clauses.length; i++) { + /* + * Unless the pipeline has been disabled for this term, which is + * the case for terms with wildcards, we need to pass the clause + * term through the search pipeline. A pipeline returns an array + * of processed terms. Pipeline functions may expand the passed + * term, which means we may end up performing multiple index lookups + * for a single query term. + */ + var clause = query.clauses[i], + terms = null, + clauseMatches = lunr.Set.empty + + if (clause.usePipeline) { + terms = this.pipeline.runString(clause.term, { + fields: clause.fields + }) + } else { + terms = [clause.term] + } + + for (var m = 0; m < terms.length; m++) { + var term = terms[m] + + /* + * Each term returned from the pipeline needs to use the same query + * clause object, e.g. the same boost and or edit distance. The + * simplest way to do this is to re-use the clause object but mutate + * its term property. + */ + clause.term = term + + /* + * From the term in the clause we create a token set which will then + * be used to intersect the indexes token set to get a list of terms + * to lookup in the inverted index + */ + var termTokenSet = lunr.TokenSet.fromClause(clause), + expandedTerms = this.tokenSet.intersect(termTokenSet).toArray() + + /* + * If a term marked as required does not exist in the tokenSet it is + * impossible for the search to return any matches. We set all the field + * scoped required matches set to empty and stop examining any further + * clauses. + */ + if (expandedTerms.length === 0 && clause.presence === lunr.Query.presence.REQUIRED) { + for (var k = 0; k < clause.fields.length; k++) { + var field = clause.fields[k] + requiredMatches[field] = lunr.Set.empty + } + + break + } + + for (var j = 0; j < expandedTerms.length; j++) { + /* + * For each term get the posting and termIndex, this is required for + * building the query vector. + */ + var expandedTerm = expandedTerms[j], + posting = this.invertedIndex[expandedTerm], + termIndex = posting._index + + for (var k = 0; k < clause.fields.length; k++) { + /* + * For each field that this query term is scoped by (by default + * all fields are in scope) we need to get all the document refs + * that have this term in that field. + * + * The posting is the entry in the invertedIndex for the matching + * term from above. + */ + var field = clause.fields[k], + fieldPosting = posting[field], + matchingDocumentRefs = Object.keys(fieldPosting), + termField = expandedTerm + "/" + field, + matchingDocumentsSet = new lunr.Set(matchingDocumentRefs) + + /* + * if the presence of this term is required ensure that the matching + * documents are added to the set of required matches for this clause. + * + */ + if (clause.presence == lunr.Query.presence.REQUIRED) { + clauseMatches = clauseMatches.union(matchingDocumentsSet) + + if (requiredMatches[field] === undefined) { + requiredMatches[field] = lunr.Set.complete + } + } + + /* + * if the presence of this term is prohibited ensure that the matching + * documents are added to the set of prohibited matches for this field, + * creating that set if it does not yet exist. + */ + if (clause.presence == lunr.Query.presence.PROHIBITED) { + if (prohibitedMatches[field] === undefined) { + prohibitedMatches[field] = lunr.Set.empty + } + + prohibitedMatches[field] = prohibitedMatches[field].union(matchingDocumentsSet) + + /* + * Prohibited matches should not be part of the query vector used for + * similarity scoring and no metadata should be extracted so we continue + * to the next field + */ + continue + } + + /* + * The query field vector is populated using the termIndex found for + * the term and a unit value with the appropriate boost applied. + * Using upsert because there could already be an entry in the vector + * for the term we are working with. In that case we just add the scores + * together. + */ + queryVectors[field].upsert(termIndex, clause.boost, function (a, b) { return a + b }) + + /** + * If we've already seen this term, field combo then we've already collected + * the matching documents and metadata, no need to go through all that again + */ + if (termFieldCache[termField]) { + continue + } + + for (var l = 0; l < matchingDocumentRefs.length; l++) { + /* + * All metadata for this term/field/document triple + * are then extracted and collected into an instance + * of lunr.MatchData ready to be returned in the query + * results + */ + var matchingDocumentRef = matchingDocumentRefs[l], + matchingFieldRef = new lunr.FieldRef (matchingDocumentRef, field), + metadata = fieldPosting[matchingDocumentRef], + fieldMatch + + if ((fieldMatch = matchingFields[matchingFieldRef]) === undefined) { + matchingFields[matchingFieldRef] = new lunr.MatchData (expandedTerm, field, metadata) + } else { + fieldMatch.add(expandedTerm, field, metadata) + } + + } + + termFieldCache[termField] = true + } + } + } + + /** + * If the presence was required we need to update the requiredMatches field sets. + * We do this after all fields for the term have collected their matches because + * the clause terms presence is required in _any_ of the fields not _all_ of the + * fields. + */ + if (clause.presence === lunr.Query.presence.REQUIRED) { + for (var k = 0; k < clause.fields.length; k++) { + var field = clause.fields[k] + requiredMatches[field] = requiredMatches[field].intersect(clauseMatches) + } + } + } + + /** + * Need to combine the field scoped required and prohibited + * matching documents into a global set of required and prohibited + * matches + */ + var allRequiredMatches = lunr.Set.complete, + allProhibitedMatches = lunr.Set.empty + + for (var i = 0; i < this.fields.length; i++) { + var field = this.fields[i] + + if (requiredMatches[field]) { + allRequiredMatches = allRequiredMatches.intersect(requiredMatches[field]) + } + + if (prohibitedMatches[field]) { + allProhibitedMatches = allProhibitedMatches.union(prohibitedMatches[field]) + } + } + + var matchingFieldRefs = Object.keys(matchingFields), + results = [], + matches = Object.create(null) + + /* + * If the query is negated (contains only prohibited terms) + * we need to get _all_ fieldRefs currently existing in the + * index. This is only done when we know that the query is + * entirely prohibited terms to avoid any cost of getting all + * fieldRefs unnecessarily. + * + * Additionally, blank MatchData must be created to correctly + * populate the results. + */ + if (query.isNegated()) { + matchingFieldRefs = Object.keys(this.fieldVectors) + + for (var i = 0; i < matchingFieldRefs.length; i++) { + var matchingFieldRef = matchingFieldRefs[i] + var fieldRef = lunr.FieldRef.fromString(matchingFieldRef) + matchingFields[matchingFieldRef] = new lunr.MatchData + } + } + + for (var i = 0; i < matchingFieldRefs.length; i++) { + /* + * Currently we have document fields that match the query, but we + * need to return documents. The matchData and scores are combined + * from multiple fields belonging to the same document. + * + * Scores are calculated by field, using the query vectors created + * above, and combined into a final document score using addition. + */ + var fieldRef = lunr.FieldRef.fromString(matchingFieldRefs[i]), + docRef = fieldRef.docRef + + if (!allRequiredMatches.contains(docRef)) { + continue + } + + if (allProhibitedMatches.contains(docRef)) { + continue + } + + var fieldVector = this.fieldVectors[fieldRef], + score = queryVectors[fieldRef.fieldName].similarity(fieldVector), + docMatch + + if ((docMatch = matches[docRef]) !== undefined) { + docMatch.score += score + docMatch.matchData.combine(matchingFields[fieldRef]) + } else { + var match = { + ref: docRef, + score: score, + matchData: matchingFields[fieldRef] + } + matches[docRef] = match + results.push(match) + } + } + + /* + * Sort the results objects by score, highest first. + */ + return results.sort(function (a, b) { + return b.score - a.score + }) +} + +/** + * Prepares the index for JSON serialization. + * + * The schema for this JSON blob will be described in a + * separate JSON schema file. + * + * @returns {Object} + */ +lunr.Index.prototype.toJSON = function () { + var invertedIndex = Object.keys(this.invertedIndex) + .sort() + .map(function (term) { + return [term, this.invertedIndex[term]] + }, this) + + var fieldVectors = Object.keys(this.fieldVectors) + .map(function (ref) { + return [ref, this.fieldVectors[ref].toJSON()] + }, this) + + return { + version: lunr.version, + fields: this.fields, + fieldVectors: fieldVectors, + invertedIndex: invertedIndex, + pipeline: this.pipeline.toJSON() + } +} + +/** + * Loads a previously serialized lunr.Index + * + * @param {Object} serializedIndex - A previously serialized lunr.Index + * @returns {lunr.Index} + */ +lunr.Index.load = function (serializedIndex) { + var attrs = {}, + fieldVectors = {}, + serializedVectors = serializedIndex.fieldVectors, + invertedIndex = Object.create(null), + serializedInvertedIndex = serializedIndex.invertedIndex, + tokenSetBuilder = new lunr.TokenSet.Builder, + pipeline = lunr.Pipeline.load(serializedIndex.pipeline) + + if (serializedIndex.version != lunr.version) { + lunr.utils.warn("Version mismatch when loading serialised index. Current version of lunr '" + lunr.version + "' does not match serialized index '" + serializedIndex.version + "'") + } + + for (var i = 0; i < serializedVectors.length; i++) { + var tuple = serializedVectors[i], + ref = tuple[0], + elements = tuple[1] + + fieldVectors[ref] = new lunr.Vector(elements) + } + + for (var i = 0; i < serializedInvertedIndex.length; i++) { + var tuple = serializedInvertedIndex[i], + term = tuple[0], + posting = tuple[1] + + tokenSetBuilder.insert(term) + invertedIndex[term] = posting + } + + tokenSetBuilder.finish() + + attrs.fields = serializedIndex.fields + + attrs.fieldVectors = fieldVectors + attrs.invertedIndex = invertedIndex + attrs.tokenSet = tokenSetBuilder.root + attrs.pipeline = pipeline + + return new lunr.Index(attrs) +} +/*! + * lunr.Builder + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * lunr.Builder performs indexing on a set of documents and + * returns instances of lunr.Index ready for querying. + * + * All configuration of the index is done via the builder, the + * fields to index, the document reference, the text processing + * pipeline and document scoring parameters are all set on the + * builder before indexing. + * + * @constructor + * @property {string} _ref - Internal reference to the document reference field. + * @property {string[]} _fields - Internal reference to the document fields to index. + * @property {object} invertedIndex - The inverted index maps terms to document fields. + * @property {object} documentTermFrequencies - Keeps track of document term frequencies. + * @property {object} documentLengths - Keeps track of the length of documents added to the index. + * @property {lunr.tokenizer} tokenizer - Function for splitting strings into tokens for indexing. + * @property {lunr.Pipeline} pipeline - The pipeline performs text processing on tokens before indexing. + * @property {lunr.Pipeline} searchPipeline - A pipeline for processing search terms before querying the index. + * @property {number} documentCount - Keeps track of the total number of documents indexed. + * @property {number} _b - A parameter to control field length normalization, setting this to 0 disabled normalization, 1 fully normalizes field lengths, the default value is 0.75. + * @property {number} _k1 - A parameter to control how quickly an increase in term frequency results in term frequency saturation, the default value is 1.2. + * @property {number} termIndex - A counter incremented for each unique term, used to identify a terms position in the vector space. + * @property {array} metadataWhitelist - A list of metadata keys that have been whitelisted for entry in the index. + */ +lunr.Builder = function () { + this._ref = "id" + this._fields = Object.create(null) + this._documents = Object.create(null) + this.invertedIndex = Object.create(null) + this.fieldTermFrequencies = {} + this.fieldLengths = {} + this.tokenizer = lunr.tokenizer + this.pipeline = new lunr.Pipeline + this.searchPipeline = new lunr.Pipeline + this.documentCount = 0 + this._b = 0.75 + this._k1 = 1.2 + this.termIndex = 0 + this.metadataWhitelist = [] +} + +/** + * Sets the document field used as the document reference. Every document must have this field. + * The type of this field in the document should be a string, if it is not a string it will be + * coerced into a string by calling toString. + * + * The default ref is 'id'. + * + * The ref should _not_ be changed during indexing, it should be set before any documents are + * added to the index. Changing it during indexing can lead to inconsistent results. + * + * @param {string} ref - The name of the reference field in the document. + */ +lunr.Builder.prototype.ref = function (ref) { + this._ref = ref +} + +/** + * A function that is used to extract a field from a document. + * + * Lunr expects a field to be at the top level of a document, if however the field + * is deeply nested within a document an extractor function can be used to extract + * the right field for indexing. + * + * @callback fieldExtractor + * @param {object} doc - The document being added to the index. + * @returns {?(string|object|object[])} obj - The object that will be indexed for this field. + * @example Extracting a nested field + * function (doc) { return doc.nested.field } + */ + +/** + * Adds a field to the list of document fields that will be indexed. Every document being + * indexed should have this field. Null values for this field in indexed documents will + * not cause errors but will limit the chance of that document being retrieved by searches. + * + * All fields should be added before adding documents to the index. Adding fields after + * a document has been indexed will have no effect on already indexed documents. + * + * Fields can be boosted at build time. This allows terms within that field to have more + * importance when ranking search results. Use a field boost to specify that matches within + * one field are more important than other fields. + * + * @param {string} fieldName - The name of a field to index in all documents. + * @param {object} attributes - Optional attributes associated with this field. + * @param {number} [attributes.boost=1] - Boost applied to all terms within this field. + * @param {fieldExtractor} [attributes.extractor] - Function to extract a field from a document. + * @throws {RangeError} fieldName cannot contain unsupported characters '/' + */ +lunr.Builder.prototype.field = function (fieldName, attributes) { + if (/\//.test(fieldName)) { + throw new RangeError ("Field '" + fieldName + "' contains illegal character '/'") + } + + this._fields[fieldName] = attributes || {} +} + +/** + * A parameter to tune the amount of field length normalisation that is applied when + * calculating relevance scores. A value of 0 will completely disable any normalisation + * and a value of 1 will fully normalise field lengths. The default is 0.75. Values of b + * will be clamped to the range 0 - 1. + * + * @param {number} number - The value to set for this tuning parameter. + */ +lunr.Builder.prototype.b = function (number) { + if (number < 0) { + this._b = 0 + } else if (number > 1) { + this._b = 1 + } else { + this._b = number + } +} + +/** + * A parameter that controls the speed at which a rise in term frequency results in term + * frequency saturation. The default value is 1.2. Setting this to a higher value will give + * slower saturation levels, a lower value will result in quicker saturation. + * + * @param {number} number - The value to set for this tuning parameter. + */ +lunr.Builder.prototype.k1 = function (number) { + this._k1 = number +} + +/** + * Adds a document to the index. + * + * Before adding fields to the index the index should have been fully setup, with the document + * ref and all fields to index already having been specified. + * + * The document must have a field name as specified by the ref (by default this is 'id') and + * it should have all fields defined for indexing, though null or undefined values will not + * cause errors. + * + * Entire documents can be boosted at build time. Applying a boost to a document indicates that + * this document should rank higher in search results than other documents. + * + * @param {object} doc - The document to add to the index. + * @param {object} attributes - Optional attributes associated with this document. + * @param {number} [attributes.boost=1] - Boost applied to all terms within this document. + */ +lunr.Builder.prototype.add = function (doc, attributes) { + var docRef = doc[this._ref], + fields = Object.keys(this._fields) + + this._documents[docRef] = attributes || {} + this.documentCount += 1 + + for (var i = 0; i < fields.length; i++) { + var fieldName = fields[i], + extractor = this._fields[fieldName].extractor, + field = extractor ? extractor(doc) : doc[fieldName], + tokens = this.tokenizer(field, { + fields: [fieldName] + }), + terms = this.pipeline.run(tokens), + fieldRef = new lunr.FieldRef (docRef, fieldName), + fieldTerms = Object.create(null) + + this.fieldTermFrequencies[fieldRef] = fieldTerms + this.fieldLengths[fieldRef] = 0 + + // store the length of this field for this document + this.fieldLengths[fieldRef] += terms.length + + // calculate term frequencies for this field + for (var j = 0; j < terms.length; j++) { + var term = terms[j] + + if (fieldTerms[term] == undefined) { + fieldTerms[term] = 0 + } + + fieldTerms[term] += 1 + + // add to inverted index + // create an initial posting if one doesn't exist + if (this.invertedIndex[term] == undefined) { + var posting = Object.create(null) + posting["_index"] = this.termIndex + this.termIndex += 1 + + for (var k = 0; k < fields.length; k++) { + posting[fields[k]] = Object.create(null) + } + + this.invertedIndex[term] = posting + } + + // add an entry for this term/fieldName/docRef to the invertedIndex + if (this.invertedIndex[term][fieldName][docRef] == undefined) { + this.invertedIndex[term][fieldName][docRef] = Object.create(null) + } + + // store all whitelisted metadata about this token in the + // inverted index + for (var l = 0; l < this.metadataWhitelist.length; l++) { + var metadataKey = this.metadataWhitelist[l], + metadata = term.metadata[metadataKey] + + if (this.invertedIndex[term][fieldName][docRef][metadataKey] == undefined) { + this.invertedIndex[term][fieldName][docRef][metadataKey] = [] + } + + this.invertedIndex[term][fieldName][docRef][metadataKey].push(metadata) + } + } + + } +} + +/** + * Calculates the average document length for this index + * + * @private + */ +lunr.Builder.prototype.calculateAverageFieldLengths = function () { + + var fieldRefs = Object.keys(this.fieldLengths), + numberOfFields = fieldRefs.length, + accumulator = {}, + documentsWithField = {} + + for (var i = 0; i < numberOfFields; i++) { + var fieldRef = lunr.FieldRef.fromString(fieldRefs[i]), + field = fieldRef.fieldName + + documentsWithField[field] || (documentsWithField[field] = 0) + documentsWithField[field] += 1 + + accumulator[field] || (accumulator[field] = 0) + accumulator[field] += this.fieldLengths[fieldRef] + } + + var fields = Object.keys(this._fields) + + for (var i = 0; i < fields.length; i++) { + var fieldName = fields[i] + accumulator[fieldName] = accumulator[fieldName] / documentsWithField[fieldName] + } + + this.averageFieldLength = accumulator +} + +/** + * Builds a vector space model of every document using lunr.Vector + * + * @private + */ +lunr.Builder.prototype.createFieldVectors = function () { + var fieldVectors = {}, + fieldRefs = Object.keys(this.fieldTermFrequencies), + fieldRefsLength = fieldRefs.length, + termIdfCache = Object.create(null) + + for (var i = 0; i < fieldRefsLength; i++) { + var fieldRef = lunr.FieldRef.fromString(fieldRefs[i]), + fieldName = fieldRef.fieldName, + fieldLength = this.fieldLengths[fieldRef], + fieldVector = new lunr.Vector, + termFrequencies = this.fieldTermFrequencies[fieldRef], + terms = Object.keys(termFrequencies), + termsLength = terms.length + + + var fieldBoost = this._fields[fieldName].boost || 1, + docBoost = this._documents[fieldRef.docRef].boost || 1 + + for (var j = 0; j < termsLength; j++) { + var term = terms[j], + tf = termFrequencies[term], + termIndex = this.invertedIndex[term]._index, + idf, score, scoreWithPrecision + + if (termIdfCache[term] === undefined) { + idf = lunr.idf(this.invertedIndex[term], this.documentCount) + termIdfCache[term] = idf + } else { + idf = termIdfCache[term] + } + + score = idf * ((this._k1 + 1) * tf) / (this._k1 * (1 - this._b + this._b * (fieldLength / this.averageFieldLength[fieldName])) + tf) + score *= fieldBoost + score *= docBoost + scoreWithPrecision = Math.round(score * 1000) / 1000 + // Converts 1.23456789 to 1.234. + // Reducing the precision so that the vectors take up less + // space when serialised. Doing it now so that they behave + // the same before and after serialisation. Also, this is + // the fastest approach to reducing a number's precision in + // JavaScript. + + fieldVector.insert(termIndex, scoreWithPrecision) + } + + fieldVectors[fieldRef] = fieldVector + } + + this.fieldVectors = fieldVectors +} + +/** + * Creates a token set of all tokens in the index using lunr.TokenSet + * + * @private + */ +lunr.Builder.prototype.createTokenSet = function () { + this.tokenSet = lunr.TokenSet.fromArray( + Object.keys(this.invertedIndex).sort() + ) +} + +/** + * Builds the index, creating an instance of lunr.Index. + * + * This completes the indexing process and should only be called + * once all documents have been added to the index. + * + * @returns {lunr.Index} + */ +lunr.Builder.prototype.build = function () { + this.calculateAverageFieldLengths() + this.createFieldVectors() + this.createTokenSet() + + return new lunr.Index({ + invertedIndex: this.invertedIndex, + fieldVectors: this.fieldVectors, + tokenSet: this.tokenSet, + fields: Object.keys(this._fields), + pipeline: this.searchPipeline + }) +} + +/** + * Applies a plugin to the index builder. + * + * A plugin is a function that is called with the index builder as its context. + * Plugins can be used to customise or extend the behaviour of the index + * in some way. A plugin is just a function, that encapsulated the custom + * behaviour that should be applied when building the index. + * + * The plugin function will be called with the index builder as its argument, additional + * arguments can also be passed when calling use. The function will be called + * with the index builder as its context. + * + * @param {Function} plugin The plugin to apply. + */ +lunr.Builder.prototype.use = function (fn) { + var args = Array.prototype.slice.call(arguments, 1) + args.unshift(this) + fn.apply(this, args) +} +/** + * Contains and collects metadata about a matching document. + * A single instance of lunr.MatchData is returned as part of every + * lunr.Index~Result. + * + * @constructor + * @param {string} term - The term this match data is associated with + * @param {string} field - The field in which the term was found + * @param {object} metadata - The metadata recorded about this term in this field + * @property {object} metadata - A cloned collection of metadata associated with this document. + * @see {@link lunr.Index~Result} + */ +lunr.MatchData = function (term, field, metadata) { + var clonedMetadata = Object.create(null), + metadataKeys = Object.keys(metadata || {}) + + // Cloning the metadata to prevent the original + // being mutated during match data combination. + // Metadata is kept in an array within the inverted + // index so cloning the data can be done with + // Array#slice + for (var i = 0; i < metadataKeys.length; i++) { + var key = metadataKeys[i] + clonedMetadata[key] = metadata[key].slice() + } + + this.metadata = Object.create(null) + + if (term !== undefined) { + this.metadata[term] = Object.create(null) + this.metadata[term][field] = clonedMetadata + } +} + +/** + * An instance of lunr.MatchData will be created for every term that matches a + * document. However only one instance is required in a lunr.Index~Result. This + * method combines metadata from another instance of lunr.MatchData with this + * objects metadata. + * + * @param {lunr.MatchData} otherMatchData - Another instance of match data to merge with this one. + * @see {@link lunr.Index~Result} + */ +lunr.MatchData.prototype.combine = function (otherMatchData) { + var terms = Object.keys(otherMatchData.metadata) + + for (var i = 0; i < terms.length; i++) { + var term = terms[i], + fields = Object.keys(otherMatchData.metadata[term]) + + if (this.metadata[term] == undefined) { + this.metadata[term] = Object.create(null) + } + + for (var j = 0; j < fields.length; j++) { + var field = fields[j], + keys = Object.keys(otherMatchData.metadata[term][field]) + + if (this.metadata[term][field] == undefined) { + this.metadata[term][field] = Object.create(null) + } + + for (var k = 0; k < keys.length; k++) { + var key = keys[k] + + if (this.metadata[term][field][key] == undefined) { + this.metadata[term][field][key] = otherMatchData.metadata[term][field][key] + } else { + this.metadata[term][field][key] = this.metadata[term][field][key].concat(otherMatchData.metadata[term][field][key]) + } + + } + } + } +} + +/** + * Add metadata for a term/field pair to this instance of match data. + * + * @param {string} term - The term this match data is associated with + * @param {string} field - The field in which the term was found + * @param {object} metadata - The metadata recorded about this term in this field + */ +lunr.MatchData.prototype.add = function (term, field, metadata) { + if (!(term in this.metadata)) { + this.metadata[term] = Object.create(null) + this.metadata[term][field] = metadata + return + } + + if (!(field in this.metadata[term])) { + this.metadata[term][field] = metadata + return + } + + var metadataKeys = Object.keys(metadata) + + for (var i = 0; i < metadataKeys.length; i++) { + var key = metadataKeys[i] + + if (key in this.metadata[term][field]) { + this.metadata[term][field][key] = this.metadata[term][field][key].concat(metadata[key]) + } else { + this.metadata[term][field][key] = metadata[key] + } + } +} +/** + * A lunr.Query provides a programmatic way of defining queries to be performed + * against a {@link lunr.Index}. + * + * Prefer constructing a lunr.Query using the {@link lunr.Index#query} method + * so the query object is pre-initialized with the right index fields. + * + * @constructor + * @property {lunr.Query~Clause[]} clauses - An array of query clauses. + * @property {string[]} allFields - An array of all available fields in a lunr.Index. + */ +lunr.Query = function (allFields) { + this.clauses = [] + this.allFields = allFields +} + +/** + * Constants for indicating what kind of automatic wildcard insertion will be used when constructing a query clause. + * + * This allows wildcards to be added to the beginning and end of a term without having to manually do any string + * concatenation. + * + * The wildcard constants can be bitwise combined to select both leading and trailing wildcards. + * + * @constant + * @default + * @property {number} wildcard.NONE - The term will have no wildcards inserted, this is the default behaviour + * @property {number} wildcard.LEADING - Prepend the term with a wildcard, unless a leading wildcard already exists + * @property {number} wildcard.TRAILING - Append a wildcard to the term, unless a trailing wildcard already exists + * @see lunr.Query~Clause + * @see lunr.Query#clause + * @see lunr.Query#term + * @example query term with trailing wildcard + * query.term('foo', { wildcard: lunr.Query.wildcard.TRAILING }) + * @example query term with leading and trailing wildcard + * query.term('foo', { + * wildcard: lunr.Query.wildcard.LEADING | lunr.Query.wildcard.TRAILING + * }) + */ + +lunr.Query.wildcard = new String ("*") +lunr.Query.wildcard.NONE = 0 +lunr.Query.wildcard.LEADING = 1 +lunr.Query.wildcard.TRAILING = 2 + +/** + * Constants for indicating what kind of presence a term must have in matching documents. + * + * @constant + * @enum {number} + * @see lunr.Query~Clause + * @see lunr.Query#clause + * @see lunr.Query#term + * @example query term with required presence + * query.term('foo', { presence: lunr.Query.presence.REQUIRED }) + */ +lunr.Query.presence = { + /** + * Term's presence in a document is optional, this is the default value. + */ + OPTIONAL: 1, + + /** + * Term's presence in a document is required, documents that do not contain + * this term will not be returned. + */ + REQUIRED: 2, + + /** + * Term's presence in a document is prohibited, documents that do contain + * this term will not be returned. + */ + PROHIBITED: 3 +} + +/** + * A single clause in a {@link lunr.Query} contains a term and details on how to + * match that term against a {@link lunr.Index}. + * + * @typedef {Object} lunr.Query~Clause + * @property {string[]} fields - The fields in an index this clause should be matched against. + * @property {number} [boost=1] - Any boost that should be applied when matching this clause. + * @property {number} [editDistance] - Whether the term should have fuzzy matching applied, and how fuzzy the match should be. + * @property {boolean} [usePipeline] - Whether the term should be passed through the search pipeline. + * @property {number} [wildcard=lunr.Query.wildcard.NONE] - Whether the term should have wildcards appended or prepended. + * @property {number} [presence=lunr.Query.presence.OPTIONAL] - The terms presence in any matching documents. + */ + +/** + * Adds a {@link lunr.Query~Clause} to this query. + * + * Unless the clause contains the fields to be matched all fields will be matched. In addition + * a default boost of 1 is applied to the clause. + * + * @param {lunr.Query~Clause} clause - The clause to add to this query. + * @see lunr.Query~Clause + * @returns {lunr.Query} + */ +lunr.Query.prototype.clause = function (clause) { + if (!('fields' in clause)) { + clause.fields = this.allFields + } + + if (!('boost' in clause)) { + clause.boost = 1 + } + + if (!('usePipeline' in clause)) { + clause.usePipeline = true + } + + if (!('wildcard' in clause)) { + clause.wildcard = lunr.Query.wildcard.NONE + } + + if ((clause.wildcard & lunr.Query.wildcard.LEADING) && (clause.term.charAt(0) != lunr.Query.wildcard)) { + clause.term = "*" + clause.term + } + + if ((clause.wildcard & lunr.Query.wildcard.TRAILING) && (clause.term.slice(-1) != lunr.Query.wildcard)) { + clause.term = "" + clause.term + "*" + } + + if (!('presence' in clause)) { + clause.presence = lunr.Query.presence.OPTIONAL + } + + this.clauses.push(clause) + + return this +} + +/** + * A negated query is one in which every clause has a presence of + * prohibited. These queries require some special processing to return + * the expected results. + * + * @returns boolean + */ +lunr.Query.prototype.isNegated = function () { + for (var i = 0; i < this.clauses.length; i++) { + if (this.clauses[i].presence != lunr.Query.presence.PROHIBITED) { + return false + } + } + + return true +} + +/** + * Adds a term to the current query, under the covers this will create a {@link lunr.Query~Clause} + * to the list of clauses that make up this query. + * + * The term is used as is, i.e. no tokenization will be performed by this method. Instead conversion + * to a token or token-like string should be done before calling this method. + * + * The term will be converted to a string by calling `toString`. Multiple terms can be passed as an + * array, each term in the array will share the same options. + * + * @param {object|object[]} term - The term(s) to add to the query. + * @param {object} [options] - Any additional properties to add to the query clause. + * @returns {lunr.Query} + * @see lunr.Query#clause + * @see lunr.Query~Clause + * @example adding a single term to a query + * query.term("foo") + * @example adding a single term to a query and specifying search fields, term boost and automatic trailing wildcard + * query.term("foo", { + * fields: ["title"], + * boost: 10, + * wildcard: lunr.Query.wildcard.TRAILING + * }) + * @example using lunr.tokenizer to convert a string to tokens before using them as terms + * query.term(lunr.tokenizer("foo bar")) + */ +lunr.Query.prototype.term = function (term, options) { + if (Array.isArray(term)) { + term.forEach(function (t) { this.term(t, lunr.utils.clone(options)) }, this) + return this + } + + var clause = options || {} + clause.term = term.toString() + + this.clause(clause) + + return this +} +lunr.QueryParseError = function (message, start, end) { + this.name = "QueryParseError" + this.message = message + this.start = start + this.end = end +} + +lunr.QueryParseError.prototype = new Error +lunr.QueryLexer = function (str) { + this.lexemes = [] + this.str = str + this.length = str.length + this.pos = 0 + this.start = 0 + this.escapeCharPositions = [] +} + +lunr.QueryLexer.prototype.run = function () { + var state = lunr.QueryLexer.lexText + + while (state) { + state = state(this) + } +} + +lunr.QueryLexer.prototype.sliceString = function () { + var subSlices = [], + sliceStart = this.start, + sliceEnd = this.pos + + for (var i = 0; i < this.escapeCharPositions.length; i++) { + sliceEnd = this.escapeCharPositions[i] + subSlices.push(this.str.slice(sliceStart, sliceEnd)) + sliceStart = sliceEnd + 1 + } + + subSlices.push(this.str.slice(sliceStart, this.pos)) + this.escapeCharPositions.length = 0 + + return subSlices.join('') +} + +lunr.QueryLexer.prototype.emit = function (type) { + this.lexemes.push({ + type: type, + str: this.sliceString(), + start: this.start, + end: this.pos + }) + + this.start = this.pos +} + +lunr.QueryLexer.prototype.escapeCharacter = function () { + this.escapeCharPositions.push(this.pos - 1) + this.pos += 1 +} + +lunr.QueryLexer.prototype.next = function () { + if (this.pos >= this.length) { + return lunr.QueryLexer.EOS + } + + var char = this.str.charAt(this.pos) + this.pos += 1 + return char +} + +lunr.QueryLexer.prototype.width = function () { + return this.pos - this.start +} + +lunr.QueryLexer.prototype.ignore = function () { + if (this.start == this.pos) { + this.pos += 1 + } + + this.start = this.pos +} + +lunr.QueryLexer.prototype.backup = function () { + this.pos -= 1 +} + +lunr.QueryLexer.prototype.acceptDigitRun = function () { + var char, charCode + + do { + char = this.next() + charCode = char.charCodeAt(0) + } while (charCode > 47 && charCode < 58) + + if (char != lunr.QueryLexer.EOS) { + this.backup() + } +} + +lunr.QueryLexer.prototype.more = function () { + return this.pos < this.length +} + +lunr.QueryLexer.EOS = 'EOS' +lunr.QueryLexer.FIELD = 'FIELD' +lunr.QueryLexer.TERM = 'TERM' +lunr.QueryLexer.EDIT_DISTANCE = 'EDIT_DISTANCE' +lunr.QueryLexer.BOOST = 'BOOST' +lunr.QueryLexer.PRESENCE = 'PRESENCE' + +lunr.QueryLexer.lexField = function (lexer) { + lexer.backup() + lexer.emit(lunr.QueryLexer.FIELD) + lexer.ignore() + return lunr.QueryLexer.lexText +} + +lunr.QueryLexer.lexTerm = function (lexer) { + if (lexer.width() > 1) { + lexer.backup() + lexer.emit(lunr.QueryLexer.TERM) + } + + lexer.ignore() + + if (lexer.more()) { + return lunr.QueryLexer.lexText + } +} + +lunr.QueryLexer.lexEditDistance = function (lexer) { + lexer.ignore() + lexer.acceptDigitRun() + lexer.emit(lunr.QueryLexer.EDIT_DISTANCE) + return lunr.QueryLexer.lexText +} + +lunr.QueryLexer.lexBoost = function (lexer) { + lexer.ignore() + lexer.acceptDigitRun() + lexer.emit(lunr.QueryLexer.BOOST) + return lunr.QueryLexer.lexText +} + +lunr.QueryLexer.lexEOS = function (lexer) { + if (lexer.width() > 0) { + lexer.emit(lunr.QueryLexer.TERM) + } +} + +// This matches the separator used when tokenising fields +// within a document. These should match otherwise it is +// not possible to search for some tokens within a document. +// +// It is possible for the user to change the separator on the +// tokenizer so it _might_ clash with any other of the special +// characters already used within the search string, e.g. :. +// +// This means that it is possible to change the separator in +// such a way that makes some words unsearchable using a search +// string. +lunr.QueryLexer.termSeparator = lunr.tokenizer.separator + +lunr.QueryLexer.lexText = function (lexer) { + while (true) { + var char = lexer.next() + + if (char == lunr.QueryLexer.EOS) { + return lunr.QueryLexer.lexEOS + } + + // Escape character is '\' + if (char.charCodeAt(0) == 92) { + lexer.escapeCharacter() + continue + } + + if (char == ":") { + return lunr.QueryLexer.lexField + } + + if (char == "~") { + lexer.backup() + if (lexer.width() > 0) { + lexer.emit(lunr.QueryLexer.TERM) + } + return lunr.QueryLexer.lexEditDistance + } + + if (char == "^") { + lexer.backup() + if (lexer.width() > 0) { + lexer.emit(lunr.QueryLexer.TERM) + } + return lunr.QueryLexer.lexBoost + } + + // "+" indicates term presence is required + // checking for length to ensure that only + // leading "+" are considered + if (char == "+" && lexer.width() === 1) { + lexer.emit(lunr.QueryLexer.PRESENCE) + return lunr.QueryLexer.lexText + } + + // "-" indicates term presence is prohibited + // checking for length to ensure that only + // leading "-" are considered + if (char == "-" && lexer.width() === 1) { + lexer.emit(lunr.QueryLexer.PRESENCE) + return lunr.QueryLexer.lexText + } + + if (char.match(lunr.QueryLexer.termSeparator)) { + return lunr.QueryLexer.lexTerm + } + } +} + +lunr.QueryParser = function (str, query) { + this.lexer = new lunr.QueryLexer (str) + this.query = query + this.currentClause = {} + this.lexemeIdx = 0 +} + +lunr.QueryParser.prototype.parse = function () { + this.lexer.run() + this.lexemes = this.lexer.lexemes + + var state = lunr.QueryParser.parseClause + + while (state) { + state = state(this) + } + + return this.query +} + +lunr.QueryParser.prototype.peekLexeme = function () { + return this.lexemes[this.lexemeIdx] +} + +lunr.QueryParser.prototype.consumeLexeme = function () { + var lexeme = this.peekLexeme() + this.lexemeIdx += 1 + return lexeme +} + +lunr.QueryParser.prototype.nextClause = function () { + var completedClause = this.currentClause + this.query.clause(completedClause) + this.currentClause = {} +} + +lunr.QueryParser.parseClause = function (parser) { + var lexeme = parser.peekLexeme() + + if (lexeme == undefined) { + return + } + + switch (lexeme.type) { + case lunr.QueryLexer.PRESENCE: + return lunr.QueryParser.parsePresence + case lunr.QueryLexer.FIELD: + return lunr.QueryParser.parseField + case lunr.QueryLexer.TERM: + return lunr.QueryParser.parseTerm + default: + var errorMessage = "expected either a field or a term, found " + lexeme.type + + if (lexeme.str.length >= 1) { + errorMessage += " with value '" + lexeme.str + "'" + } + + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } +} + +lunr.QueryParser.parsePresence = function (parser) { + var lexeme = parser.consumeLexeme() + + if (lexeme == undefined) { + return + } + + switch (lexeme.str) { + case "-": + parser.currentClause.presence = lunr.Query.presence.PROHIBITED + break + case "+": + parser.currentClause.presence = lunr.Query.presence.REQUIRED + break + default: + var errorMessage = "unrecognised presence operator'" + lexeme.str + "'" + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + var nextLexeme = parser.peekLexeme() + + if (nextLexeme == undefined) { + var errorMessage = "expecting term or field, found nothing" + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + switch (nextLexeme.type) { + case lunr.QueryLexer.FIELD: + return lunr.QueryParser.parseField + case lunr.QueryLexer.TERM: + return lunr.QueryParser.parseTerm + default: + var errorMessage = "expecting term or field, found '" + nextLexeme.type + "'" + throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) + } +} + +lunr.QueryParser.parseField = function (parser) { + var lexeme = parser.consumeLexeme() + + if (lexeme == undefined) { + return + } + + if (parser.query.allFields.indexOf(lexeme.str) == -1) { + var possibleFields = parser.query.allFields.map(function (f) { return "'" + f + "'" }).join(', '), + errorMessage = "unrecognised field '" + lexeme.str + "', possible fields: " + possibleFields + + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + parser.currentClause.fields = [lexeme.str] + + var nextLexeme = parser.peekLexeme() + + if (nextLexeme == undefined) { + var errorMessage = "expecting term, found nothing" + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + switch (nextLexeme.type) { + case lunr.QueryLexer.TERM: + return lunr.QueryParser.parseTerm + default: + var errorMessage = "expecting term, found '" + nextLexeme.type + "'" + throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) + } +} + +lunr.QueryParser.parseTerm = function (parser) { + var lexeme = parser.consumeLexeme() + + if (lexeme == undefined) { + return + } + + parser.currentClause.term = lexeme.str.toLowerCase() + + if (lexeme.str.indexOf("*") != -1) { + parser.currentClause.usePipeline = false + } + + var nextLexeme = parser.peekLexeme() + + if (nextLexeme == undefined) { + parser.nextClause() + return + } + + switch (nextLexeme.type) { + case lunr.QueryLexer.TERM: + parser.nextClause() + return lunr.QueryParser.parseTerm + case lunr.QueryLexer.FIELD: + parser.nextClause() + return lunr.QueryParser.parseField + case lunr.QueryLexer.EDIT_DISTANCE: + return lunr.QueryParser.parseEditDistance + case lunr.QueryLexer.BOOST: + return lunr.QueryParser.parseBoost + case lunr.QueryLexer.PRESENCE: + parser.nextClause() + return lunr.QueryParser.parsePresence + default: + var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'" + throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) + } +} + +lunr.QueryParser.parseEditDistance = function (parser) { + var lexeme = parser.consumeLexeme() + + if (lexeme == undefined) { + return + } + + var editDistance = parseInt(lexeme.str, 10) + + if (isNaN(editDistance)) { + var errorMessage = "edit distance must be numeric" + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + parser.currentClause.editDistance = editDistance + + var nextLexeme = parser.peekLexeme() + + if (nextLexeme == undefined) { + parser.nextClause() + return + } + + switch (nextLexeme.type) { + case lunr.QueryLexer.TERM: + parser.nextClause() + return lunr.QueryParser.parseTerm + case lunr.QueryLexer.FIELD: + parser.nextClause() + return lunr.QueryParser.parseField + case lunr.QueryLexer.EDIT_DISTANCE: + return lunr.QueryParser.parseEditDistance + case lunr.QueryLexer.BOOST: + return lunr.QueryParser.parseBoost + case lunr.QueryLexer.PRESENCE: + parser.nextClause() + return lunr.QueryParser.parsePresence + default: + var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'" + throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) + } +} + +lunr.QueryParser.parseBoost = function (parser) { + var lexeme = parser.consumeLexeme() + + if (lexeme == undefined) { + return + } + + var boost = parseInt(lexeme.str, 10) + + if (isNaN(boost)) { + var errorMessage = "boost must be numeric" + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + parser.currentClause.boost = boost + + var nextLexeme = parser.peekLexeme() + + if (nextLexeme == undefined) { + parser.nextClause() + return + } + + switch (nextLexeme.type) { + case lunr.QueryLexer.TERM: + parser.nextClause() + return lunr.QueryParser.parseTerm + case lunr.QueryLexer.FIELD: + parser.nextClause() + return lunr.QueryParser.parseField + case lunr.QueryLexer.EDIT_DISTANCE: + return lunr.QueryParser.parseEditDistance + case lunr.QueryLexer.BOOST: + return lunr.QueryParser.parseBoost + case lunr.QueryLexer.PRESENCE: + parser.nextClause() + return lunr.QueryParser.parsePresence + default: + var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'" + throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) + } +} + + /** + * export the module via AMD, CommonJS or as a browser global + * Export code from https://github.com/umdjs/umd/blob/master/returnExports.js + */ + ;(function (root, factory) { + if (typeof define === 'function' && define.amd) { + // AMD. Register as an anonymous module. + define(factory) + } else if (typeof exports === 'object') { + /** + * Node. Does not work with strict CommonJS, but + * only CommonJS-like environments that support module.exports, + * like Node. + */ + module.exports = factory() + } else { + // Browser globals (root is window) + root.lunr = factory() + } + }(this, function () { + /** + * Just return a value to define the module export. + * This example returns an object, but the module + * can return a function as the exported value. + */ + return lunr + })) +})(); diff --git a/search/main.js b/search/main.js new file mode 100644 index 00000000..a5e469d7 --- /dev/null +++ b/search/main.js @@ -0,0 +1,109 @@ +function getSearchTermFromLocation() { + var sPageURL = window.location.search.substring(1); + var sURLVariables = sPageURL.split('&'); + for (var i = 0; i < sURLVariables.length; i++) { + var sParameterName = sURLVariables[i].split('='); + if (sParameterName[0] == 'q') { + return decodeURIComponent(sParameterName[1].replace(/\+/g, '%20')); + } + } +} + +function joinUrl (base, path) { + if (path.substring(0, 1) === "/") { + // path starts with `/`. Thus it is absolute. + return path; + } + if (base.substring(base.length-1) === "/") { + // base ends with `/` + return base + path; + } + return base + "/" + path; +} + +function escapeHtml (value) { + return value.replace(/&/g, '&') + .replace(/"/g, '"') + .replace(//g, '>'); +} + +function formatResult (location, title, summary) { + return ''; +} + +function displayResults (results) { + var search_results = document.getElementById("mkdocs-search-results"); + while (search_results.firstChild) { + search_results.removeChild(search_results.firstChild); + } + if (results.length > 0){ + for (var i=0; i < results.length; i++){ + var result = results[i]; + var html = formatResult(result.location, result.title, result.summary); + search_results.insertAdjacentHTML('beforeend', html); + } + } else { + var noResultsText = search_results.getAttribute('data-no-results-text'); + if (!noResultsText) { + noResultsText = "No results found"; + } + search_results.insertAdjacentHTML('beforeend', '

' + noResultsText + '

'); + } +} + +function doSearch () { + var query = document.getElementById('mkdocs-search-query').value; + if (query.length > min_search_length) { + if (!window.Worker) { + displayResults(search(query)); + } else { + searchWorker.postMessage({query: query}); + } + } else { + // Clear results for short queries + displayResults([]); + } +} + +function initSearch () { + var search_input = document.getElementById('mkdocs-search-query'); + if (search_input) { + search_input.addEventListener("keyup", doSearch); + } + var term = getSearchTermFromLocation(); + if (term) { + search_input.value = term; + doSearch(); + } +} + +function onWorkerMessage (e) { + if (e.data.allowSearch) { + initSearch(); + } else if (e.data.results) { + var results = e.data.results; + displayResults(results); + } else if (e.data.config) { + min_search_length = e.data.config.min_search_length-1; + } +} + +if (!window.Worker) { + console.log('Web Worker API not supported'); + // load index in main thread + $.getScript(joinUrl(base_url, "search/worker.js")).done(function () { + console.log('Loaded worker'); + init(); + window.postMessage = function (msg) { + onWorkerMessage({data: msg}); + }; + }).fail(function (jqxhr, settings, exception) { + console.error('Could not load worker.js'); + }); +} else { + // Wrap search in a web worker + var searchWorker = new Worker(joinUrl(base_url, "search/worker.js")); + searchWorker.postMessage({init: true}); + searchWorker.onmessage = onWorkerMessage; +} diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 00000000..9b954b42 --- /dev/null +++ b/search/search_index.json @@ -0,0 +1 @@ +{"config":{"indexing":"full","lang":["en"],"min_search_length":3,"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"UPHENO Ontology Documentation Welcome to the UPHENO documentation! It is entirely empty at the moment so look no further! You can find descriptions of the standard ontology engineering workflows here .","title":"Getting started"},{"location":"#upheno-ontology-documentation","text":"Welcome to the UPHENO documentation! It is entirely empty at the moment so look no further! You can find descriptions of the standard ontology engineering workflows here .","title":"UPHENO Ontology Documentation"},{"location":"about/","text":"About uPheno The uPheno project aims to unify the annotation of phenotypes across species in a manner analogous to unification of gene function annotation by the Gene Ontology. uPheno 2.0 builds on earlier efforts with a strategy that directly leverages the work of the phenotype ontology development community and incorporates phenotypes from a much wider range of species. We have organised a collaborative community effort, including representatives of all major model organism databases, to document and align formal design patterns for representing phenotypes and further develop reference ontologies, such as PATO, which are used in these patterns. A common development infrastructure makes it easy to use these design patterns to generate both species-specific ontologies and a species-independent layer that subsumes them. The resulting community-curated ontology for the representation and integration of phenotypes across species serves two general purposes: - Providing a community-developed framework for ontology editors to bootstrap, maintain and extend their phenotype ontologies in a scalable and standardised manner. - Facilitating the retrieval and comparative analysis of species-specific phenotypes through a deep layer of species-independent phenotypes. Currently, the development of uPheno is organized by a group that meets biweekly. See the meetings page for more info, including how to participate.","title":"About uPheno"},{"location":"about/#about-upheno","text":"The uPheno project aims to unify the annotation of phenotypes across species in a manner analogous to unification of gene function annotation by the Gene Ontology. uPheno 2.0 builds on earlier efforts with a strategy that directly leverages the work of the phenotype ontology development community and incorporates phenotypes from a much wider range of species. We have organised a collaborative community effort, including representatives of all major model organism databases, to document and align formal design patterns for representing phenotypes and further develop reference ontologies, such as PATO, which are used in these patterns. A common development infrastructure makes it easy to use these design patterns to generate both species-specific ontologies and a species-independent layer that subsumes them. The resulting community-curated ontology for the representation and integration of phenotypes across species serves two general purposes: - Providing a community-developed framework for ontology editors to bootstrap, maintain and extend their phenotype ontologies in a scalable and standardised manner. - Facilitating the retrieval and comparative analysis of species-specific phenotypes through a deep layer of species-independent phenotypes. Currently, the development of uPheno is organized by a group that meets biweekly. See the meetings page for more info, including how to participate.","title":"About uPheno"},{"location":"cite/","text":"How to cite uPheno Papers uPheno 2 Matentzoglu N, Osumi-Sutherland D, Balhoff JP, Bello S, Bradford Y, Cardmody L, Grove C, Harris MA, Harris N, K\u00f6hler S, McMurry J, Mungall C, Munoz-Torres M, Pilgrim C, Robb S, Robinson PN, Segerdell E, Vasilevsky N, Haendel M. uPheno 2: Framework for standardised representation of phenotypes across species. 2019 Apr 8. http://dx.doi.org/10.7490/f1000research.1116540.1 Original uPheno Sebastian K\u00f6hler, Sandra C Doelken, Barbara J Ruef, Sebastian Bauer, Nicole Washington, Monte Westerfield, George Gkoutos, Paul Schofield, Damian Smedley, Suzanna E Lewis, Peter N Robinson, Christopher J Mungall (2013) Construction and accessibility of a cross-species phenotype ontology along with gene annotations for biomedical research F1000Research Entity-Quality definitions and phenotype modelling C J Mungall, Georgios Gkoutos, Cynthia Smith, Melissa Haendel, Suzanna Lewis, Michael Ashburner (2010) Integrating phenotype ontologies across multiple species Genome Biology 11 (1)","title":"Cite"},{"location":"cite/#how-to-cite-upheno","text":"","title":"How to cite uPheno"},{"location":"cite/#papers","text":"","title":"Papers"},{"location":"cite/#upheno-2","text":"Matentzoglu N, Osumi-Sutherland D, Balhoff JP, Bello S, Bradford Y, Cardmody L, Grove C, Harris MA, Harris N, K\u00f6hler S, McMurry J, Mungall C, Munoz-Torres M, Pilgrim C, Robb S, Robinson PN, Segerdell E, Vasilevsky N, Haendel M. uPheno 2: Framework for standardised representation of phenotypes across species. 2019 Apr 8. http://dx.doi.org/10.7490/f1000research.1116540.1","title":"uPheno 2"},{"location":"cite/#original-upheno","text":"Sebastian K\u00f6hler, Sandra C Doelken, Barbara J Ruef, Sebastian Bauer, Nicole Washington, Monte Westerfield, George Gkoutos, Paul Schofield, Damian Smedley, Suzanna E Lewis, Peter N Robinson, Christopher J Mungall (2013) Construction and accessibility of a cross-species phenotype ontology along with gene annotations for biomedical research F1000Research","title":"Original uPheno"},{"location":"cite/#entity-quality-definitions-and-phenotype-modelling","text":"C J Mungall, Georgios Gkoutos, Cynthia Smith, Melissa Haendel, Suzanna Lewis, Michael Ashburner (2010) Integrating phenotype ontologies across multiple species Genome Biology 11 (1)","title":"Entity-Quality definitions and phenotype modelling"},{"location":"contributing/","text":"How to contribute to UPHENO","title":"Contributing"},{"location":"contributing/#how-to-contribute-to-upheno","text":"","title":"How to contribute to UPHENO"},{"location":"howto/add-relation-extension/","text":"How to add the uPheno direct relation extension EQ definitions are powerful tools for reconciling phenotypes across species and driving reasoning. However, they are not all that useful for many \"normal\" users of our ontologies. We have developed a little workflow extension to take care of that. As usual please follow the steps to install the custom uPheno Makefile extension first. Now add a new component to your ont-odk.yaml file (e.g. src/ontology/mp-odk.yaml ): components: products: - filename: eq-relations.owl We can now choose if we want to add the component to your edit file as well. To do that, follow the instructions on adding an import (i.e. adding the component to the edit file and catalog file). The IRI of the component is http://purl.obolibrary.org/obo/YOURONTOLOGY/components/eq-relations.owl . For example, for MP, the IRI is http://purl.obolibrary.org/obo/mp/components/eq-relations.owl . Now we can generate the component: sh run.sh make components/eq-relations.owl This command will be run automatically during a release ( prepare_release ).","title":"Add the uPheno direct relation extension"},{"location":"howto/add-relation-extension/#how-to-add-the-upheno-direct-relation-extension","text":"EQ definitions are powerful tools for reconciling phenotypes across species and driving reasoning. However, they are not all that useful for many \"normal\" users of our ontologies. We have developed a little workflow extension to take care of that. As usual please follow the steps to install the custom uPheno Makefile extension first. Now add a new component to your ont-odk.yaml file (e.g. src/ontology/mp-odk.yaml ): components: products: - filename: eq-relations.owl We can now choose if we want to add the component to your edit file as well. To do that, follow the instructions on adding an import (i.e. adding the component to the edit file and catalog file). The IRI of the component is http://purl.obolibrary.org/obo/YOURONTOLOGY/components/eq-relations.owl . For example, for MP, the IRI is http://purl.obolibrary.org/obo/mp/components/eq-relations.owl . Now we can generate the component: sh run.sh make components/eq-relations.owl This command will be run automatically during a release ( prepare_release ).","title":"How to add the uPheno direct relation extension"},{"location":"howto/custom-upheno-makefile/","text":"Add custom uPheno Makefile The custom uPheno Makefile is an extension to your normal custom Makefile (for example, hp.Makefile, mp.Makefile, etc), located in the src/ontology directory of your ODK set up. To install it: (1) Open your normal custom Makefile and add a line in the very end: include pheno.Makefile (2) Now download the custom Makefile: https://raw.githubusercontent.com/obophenotype/upheno/master/src/ontology/config/pheno.Makefile and save it in your src/ontology directory. Feel free to use, for example, wget: cd src/ontology wget https://raw.githubusercontent.com/obophenotype/upheno/master/src/ontology/config/pheno.Makefile -O pheno.Makefile From now on you can simply run sh run.sh make update_pheno_makefile whenever you wish to synchronise the Makefile with the uPheno repo. (Note: it would probably be good to add a GitHub action that does that automatically.)","title":"Add custom uPheno Makefile"},{"location":"howto/custom-upheno-makefile/#add-custom-upheno-makefile","text":"The custom uPheno Makefile is an extension to your normal custom Makefile (for example, hp.Makefile, mp.Makefile, etc), located in the src/ontology directory of your ODK set up. To install it: (1) Open your normal custom Makefile and add a line in the very end: include pheno.Makefile (2) Now download the custom Makefile: https://raw.githubusercontent.com/obophenotype/upheno/master/src/ontology/config/pheno.Makefile and save it in your src/ontology directory. Feel free to use, for example, wget: cd src/ontology wget https://raw.githubusercontent.com/obophenotype/upheno/master/src/ontology/config/pheno.Makefile -O pheno.Makefile From now on you can simply run sh run.sh make update_pheno_makefile whenever you wish to synchronise the Makefile with the uPheno repo. (Note: it would probably be good to add a GitHub action that does that automatically.)","title":"Add custom uPheno Makefile"},{"location":"howto/editors_workflow/","text":"Phenotype Ontology Editors' Workflow Useful links Phenotype Ontology Working Group Meetings agenda and minutes gdoc . phenotype-ontologies slack channel : to send meeting reminders; ask for agenda items; questions; discussions etc. Dead simple owl design pattern (DOS-DP) Documentation Getting started with DOSDP templates . Dead Simple Ontology Design Patterns (DOSDP) . Using DOSDP templates in ODK Workflows . Validate DOS-DP yaml templates: yamllint : yaml syntax validator Installing yamllint : brew install yamllint Congiguring yamllint You can ignore the error line too long yaml syntax errors for dos-dp yaml templates. You can create a custom configuration file for yamllint in your home folder: sh touch ~/.config/yamllint/config The content of the config file should look like this: ```yaml # Custom configuration file for yamllint # It extends the default conf by adjusting some options. extends: default rules: line-length: max: 80 # 80 chars should be enough, but don't fail if a line is longer max: 140 # allow long lines level: warning allow-non-breakable-words: true allow-non-breakable-inline-mappings: true `` The custom config should turn the error line too long errors to warnings. 2. [DOS-DP validator:](https://incatools.github.io/dead_simple_owl_design_patterns/validator/): DOS-DP format validator * [Installing ](https://github.com/INCATools/dead_simple_owl_design_patterns): pip install dosdp` Patternisation is the process of ensuring that all entity quality (EQ) descriptions from textual phenotype term definitions have a logical definition pattern. A pattern is a standard format for describing a phenotype that includes a quality and an entity. For example, \"increased body size\" is a pattern that includes the quality \"increased\" and the entity \"body size.\" The goal of patternisation is to make the EQ descriptions more uniform and machine-readable, which facilitates downstream analysis. 1. Identify a group of related phenotypes from diverse organisms The first step in the Phenotype Ontology Editors' Workflow is to identify a group of related phenotypes from diverse organisms. This can be done by considering proposals from phenotype editors or by using the pattern suggestion pipeline. The phenotype editors may propose a group of related phenotypes based on their domain knowledge, while the pattern suggestion pipeline uses semantic similarity and shared Phenotype And Trait Ontology (PATO) quality terms to identify patterns in phenotype terms from different organism-specific ontologies. 2. Propose a phenotype pattern Once a group of related phenotypes is identified, the editors propose a phenotype pattern. To do this, they create a Github issue to request the phenotype pattern template in the uPheno repository. Alternatively, a new template can be proposed at a phenotype editors' meeting which can lead to the creation of a new term request as a Github issue. Ideally, the proposed phenotype pattern should include an appropriate PATO quality term for logical definition, use cases, term examples, and a textual definition pattern for the phenotype terms. 3. Discuss the new phenotype pattern draft at the regular uPheno phenotype editors meeting The next step is to discuss the new phenotype pattern draft at the regular uPheno phenotype editors meeting. During the meeting, the editors' comments and suggestions for improvements are collected as comments on the DOS-DP yaml template in the corresponding Github pull request. Based on the feedback and discussions, a consensus on improvements should be achieved. The DOS-DP yaml template is named should start with a lower case letter, should be informative, and must include the PATO quality term. A Github pull request is created for the DOS-DP yaml template. A DOS-DP phenotype pattern template example: --- pattern_name: ??pattern_and_file_name pattern_iri: http://purl.obolibrary.org/obo/upheno/patterns-dev/??pattern_and_file_name.yaml description: 'A description that helps people chose this pattern for the appropriate scenario.' # examples: # - example_IRI-1 # term name # - example_IRI-2 # term name # - example_IRI-3 # term name # - http://purl.obolibrary.org/obo/XXXXXXXXXX # XXXXXXXX contributors: - https://orcid.org/XXXX-XXXX-XXXX-XXXX # Yyy Yyyyyyyyy classes: process_quality: PATO:0001236 abnormal: PATO:0000460 anatomical_entity: UBERON:0001062 relations: characteristic_of: RO:0000052 has_modifier: RO:0002573 has_part: BFO:0000051 annotationProperties: exact_synonym: oio:hasExactSynonym related_synonym: oio:hasRelatedSynonym xref: oio:hasDbXref vars: var??: \"'anatomical_entity'\" # \"'variable_range'\" name: text: \"trait ?? %s\" vars: - var?? annotations: - annotationProperty: exact_synonym text: \"? of %s\" vars: - var?? - annotationProperty: related_synonym text: \"? %s\" vars: - var?? - annotationProperty: xref text: \"AUTO:patterns/patterns/chemical_role_attribute\" def: text: \"A trait that ?? %s.\" vars: - var?? equivalentTo: text: \"'has_part' some ( 'XXXXXXXXXXXXXXXXX' and ('characteristic_of' some %s) and ('has_modifier' some 'abnormal') )\" vars: - var?? ... 4. Review the candidate phenotype pattern Once a consensus on the improvements for a particular template is achieved, they are incorporated into the DOS-DP yaml file. Typically, the improvements are applied to the template some time before a subsequent ontology editor's meeting. There should be enough time for off-line review of the proposed pattern to allow community feedback. The improved phenotype pattern candidate draft should get approval from the community at one of the regular ontology editors' call or in a Github comment. The ontology editors who approve the pattern provide their ORCIDs and they are credited as contributors in an appropriate field of the DOS-DP pattern template. 5. Add the community-approved phenotype pattern template to uPheno Once the community-approved phenotype pattern template is created, it is added to the uPheno Github repository. The approved DOS-DP yaml phenotype pattern template should pass quality control (QC) steps. 1. Validate yaml syntax: yamllint 2. Validate DOS-DP Use DOSDP Validator . * To validate a template using the command line interface, execute: ```sh yamllint dosdp validate -i After successfully passing QC, the responsible editor merges the approved pull request, and the phenotype pattern becomes part of the uPheno phenotype pattern template collection.","title":"Phenotype Ontology Editors' Workflow"},{"location":"howto/editors_workflow/#phenotype-ontology-editors-workflow","text":"","title":"Phenotype Ontology Editors' Workflow"},{"location":"howto/editors_workflow/#useful-links","text":"Phenotype Ontology Working Group Meetings agenda and minutes gdoc . phenotype-ontologies slack channel : to send meeting reminders; ask for agenda items; questions; discussions etc. Dead simple owl design pattern (DOS-DP) Documentation Getting started with DOSDP templates . Dead Simple Ontology Design Patterns (DOSDP) . Using DOSDP templates in ODK Workflows . Validate DOS-DP yaml templates: yamllint : yaml syntax validator Installing yamllint : brew install yamllint Congiguring yamllint You can ignore the error line too long yaml syntax errors for dos-dp yaml templates. You can create a custom configuration file for yamllint in your home folder: sh touch ~/.config/yamllint/config The content of the config file should look like this: ```yaml # Custom configuration file for yamllint # It extends the default conf by adjusting some options. extends: default rules: line-length: max: 80 # 80 chars should be enough, but don't fail if a line is longer","title":"Useful links"},{"location":"howto/editors_workflow/#max-140-allow-long-lines","text":"level: warning allow-non-breakable-words: true allow-non-breakable-inline-mappings: true `` The custom config should turn the error line too long errors to warnings. 2. [DOS-DP validator:](https://incatools.github.io/dead_simple_owl_design_patterns/validator/): DOS-DP format validator * [Installing ](https://github.com/INCATools/dead_simple_owl_design_patterns): pip install dosdp` Patternisation is the process of ensuring that all entity quality (EQ) descriptions from textual phenotype term definitions have a logical definition pattern. A pattern is a standard format for describing a phenotype that includes a quality and an entity. For example, \"increased body size\" is a pattern that includes the quality \"increased\" and the entity \"body size.\" The goal of patternisation is to make the EQ descriptions more uniform and machine-readable, which facilitates downstream analysis.","title":"max: 140 # allow long lines"},{"location":"howto/editors_workflow/#1-identify-a-group-of-related-phenotypes-from-diverse-organisms","text":"The first step in the Phenotype Ontology Editors' Workflow is to identify a group of related phenotypes from diverse organisms. This can be done by considering proposals from phenotype editors or by using the pattern suggestion pipeline. The phenotype editors may propose a group of related phenotypes based on their domain knowledge, while the pattern suggestion pipeline uses semantic similarity and shared Phenotype And Trait Ontology (PATO) quality terms to identify patterns in phenotype terms from different organism-specific ontologies.","title":"1. Identify a group of related phenotypes from diverse organisms"},{"location":"howto/editors_workflow/#2-propose-a-phenotype-pattern","text":"Once a group of related phenotypes is identified, the editors propose a phenotype pattern. To do this, they create a Github issue to request the phenotype pattern template in the uPheno repository. Alternatively, a new template can be proposed at a phenotype editors' meeting which can lead to the creation of a new term request as a Github issue. Ideally, the proposed phenotype pattern should include an appropriate PATO quality term for logical definition, use cases, term examples, and a textual definition pattern for the phenotype terms.","title":"2. Propose a phenotype pattern"},{"location":"howto/editors_workflow/#3-discuss-the-new-phenotype-pattern-draft-at-the-regular-upheno-phenotype-editors-meeting","text":"The next step is to discuss the new phenotype pattern draft at the regular uPheno phenotype editors meeting. During the meeting, the editors' comments and suggestions for improvements are collected as comments on the DOS-DP yaml template in the corresponding Github pull request. Based on the feedback and discussions, a consensus on improvements should be achieved. The DOS-DP yaml template is named should start with a lower case letter, should be informative, and must include the PATO quality term. A Github pull request is created for the DOS-DP yaml template. A DOS-DP phenotype pattern template example: --- pattern_name: ??pattern_and_file_name pattern_iri: http://purl.obolibrary.org/obo/upheno/patterns-dev/??pattern_and_file_name.yaml description: 'A description that helps people chose this pattern for the appropriate scenario.' # examples: # - example_IRI-1 # term name # - example_IRI-2 # term name # - example_IRI-3 # term name # - http://purl.obolibrary.org/obo/XXXXXXXXXX # XXXXXXXX contributors: - https://orcid.org/XXXX-XXXX-XXXX-XXXX # Yyy Yyyyyyyyy classes: process_quality: PATO:0001236 abnormal: PATO:0000460 anatomical_entity: UBERON:0001062 relations: characteristic_of: RO:0000052 has_modifier: RO:0002573 has_part: BFO:0000051 annotationProperties: exact_synonym: oio:hasExactSynonym related_synonym: oio:hasRelatedSynonym xref: oio:hasDbXref vars: var??: \"'anatomical_entity'\" # \"'variable_range'\" name: text: \"trait ?? %s\" vars: - var?? annotations: - annotationProperty: exact_synonym text: \"? of %s\" vars: - var?? - annotationProperty: related_synonym text: \"? %s\" vars: - var?? - annotationProperty: xref text: \"AUTO:patterns/patterns/chemical_role_attribute\" def: text: \"A trait that ?? %s.\" vars: - var?? equivalentTo: text: \"'has_part' some ( 'XXXXXXXXXXXXXXXXX' and ('characteristic_of' some %s) and ('has_modifier' some 'abnormal') )\" vars: - var?? ...","title":"3. Discuss the new phenotype pattern draft at the regular uPheno phenotype editors meeting"},{"location":"howto/editors_workflow/#4-review-the-candidate-phenotype-pattern","text":"Once a consensus on the improvements for a particular template is achieved, they are incorporated into the DOS-DP yaml file. Typically, the improvements are applied to the template some time before a subsequent ontology editor's meeting. There should be enough time for off-line review of the proposed pattern to allow community feedback. The improved phenotype pattern candidate draft should get approval from the community at one of the regular ontology editors' call or in a Github comment. The ontology editors who approve the pattern provide their ORCIDs and they are credited as contributors in an appropriate field of the DOS-DP pattern template.","title":"4. Review the candidate phenotype pattern"},{"location":"howto/editors_workflow/#5-add-the-community-approved-phenotype-pattern-template-to-upheno","text":"Once the community-approved phenotype pattern template is created, it is added to the uPheno Github repository. The approved DOS-DP yaml phenotype pattern template should pass quality control (QC) steps. 1. Validate yaml syntax: yamllint 2. Validate DOS-DP Use DOSDP Validator . * To validate a template using the command line interface, execute: ```sh yamllint dosdp validate -i After successfully passing QC, the responsible editor merges the approved pull request, and the phenotype pattern becomes part of the uPheno phenotype pattern template collection.","title":"5. Add the community-approved phenotype pattern template to uPheno"},{"location":"howto/pattern-merge-replace-workflow/","text":"Pattern merge - replace workflow This document is on how to merge new DOSDP design patterns into an ODK ontology and then how to replace the old classes with the new ones. 1. You need the tables in tsv format with the DOSDP filler data. Download the tsv tables to $ODK-ONTOLOGY/src/patterns/data/default/ Make sure that the tsv filenames match that of the relevant yaml DOSDP pattern files. 2. Add the new matching pattern yaml filename to $ODK-ONTOLOGY/src/patterns/dosdp-patterns/external.txt 3. Import the new pattern templates that you have just added to the external.txt list from external sources into the current working repository cd ODK-ONTOLOGY/src/ontology sh run.sh make update_patterns 4. make definitions.owl cd ODK-ONTOLOGY/src/ontology sh run.sh make ../patterns/definitions.owl IMP=false 5. Remove old classes and replace them with the equivalent and patternised new classes cd ODK-ONTOLOGY/src/ontology sh run.sh make remove_patternised_classes 6. Announce the pattern migration in an appropriate channel, for example on the phenotype-ontologies Slack channel. For example: I have migrated the ... table and changed the tab colour to blue. You can delete the tab if you wish.","title":"Pattern merge - replace workflow"},{"location":"howto/pattern-merge-replace-workflow/#pattern-merge-replace-workflow","text":"This document is on how to merge new DOSDP design patterns into an ODK ontology and then how to replace the old classes with the new ones.","title":"Pattern merge - replace workflow"},{"location":"howto/pattern-merge-replace-workflow/#1-you-need-the-tables-in-tsv-format-with-the-dosdp-filler-data-download-the-tsv-tables-to","text":"$ODK-ONTOLOGY/src/patterns/data/default/ Make sure that the tsv filenames match that of the relevant yaml DOSDP pattern files.","title":"1. You need the tables in tsv format with the DOSDP filler data. Download the tsv tables to"},{"location":"howto/pattern-merge-replace-workflow/#2-add-the-new-matching-pattern-yaml-filename-to","text":"$ODK-ONTOLOGY/src/patterns/dosdp-patterns/external.txt","title":"2. Add the new matching pattern yaml filename to"},{"location":"howto/pattern-merge-replace-workflow/#3-import-the-new-pattern-templates-that-you-have-just-added-to-the-externaltxt-list-from-external-sources-into-the-current-working-repository","text":"cd ODK-ONTOLOGY/src/ontology sh run.sh make update_patterns","title":"3. Import the new pattern templates that you have just added to the external.txt list from external sources into the current working repository"},{"location":"howto/pattern-merge-replace-workflow/#4-make-definitionsowl","text":"cd ODK-ONTOLOGY/src/ontology sh run.sh make ../patterns/definitions.owl IMP=false","title":"4. make definitions.owl"},{"location":"howto/pattern-merge-replace-workflow/#5-remove-old-classes-and-replace-them-with-the-equivalent-and-patternised-new-classes","text":"cd ODK-ONTOLOGY/src/ontology sh run.sh make remove_patternised_classes","title":"5. Remove old classes and replace them with the equivalent and patternised new classes"},{"location":"howto/pattern-merge-replace-workflow/#6-announce-the-pattern-migration-in-an-appropriate-channel-for-example-on-the-phenotype-ontologies-slack-channel","text":"For example: I have migrated the ... table and changed the tab colour to blue. You can delete the tab if you wish.","title":"6. Announce the pattern migration in an appropriate channel, for example on the phenotype-ontologies Slack channel."},{"location":"howto/run-upheno2-release/","text":"How to run a uPheno 2 release In order to run a release you will have to have completed the steps to set up s3 . Clone https://github.com/obophenotype/upheno-dev cd src/scripts sh upheno_pipeline.sh cd ../ontology make prepare_upload S3_VERSION=2022-06-19 make deploy S3_VERSION=2022-06-19","title":"How to run a uPheno 2 release"},{"location":"howto/run-upheno2-release/#how-to-run-a-upheno-2-release","text":"In order to run a release you will have to have completed the steps to set up s3 . Clone https://github.com/obophenotype/upheno-dev cd src/scripts sh upheno_pipeline.sh cd ../ontology make prepare_upload S3_VERSION=2022-06-19 make deploy S3_VERSION=2022-06-19","title":"How to run a uPheno 2 release"},{"location":"howto/set-up-s3/","text":"How to set yourself up for S3 To be able to upload new uPheno release to the uPheno S3 bucket, you need to set yourself up for S3 first. Download and install AWS CLI Obtain secrets from BBOP Add configuration for secrets 1. Download and install AWS CLI The most convenient way to interact with S3 is the AWS Command Line Interface (CLI) . You can find the installers and install instructions on that page (different depending on your Operation System): - For Mac - For Windows 2. Obtain secrets from BBOP Next, you need to ask someone at BBOP (such as Chris Mungall or Seth Carbon) to provide you with an account that gives you access to the BBOP s3 buckets. You will have to provide a username. You will receive: - User name - Access key ID- - Secret access key - Console link to sign into bucket 3. Add configuration for secrets You will now have to set up your local system. You will create two files: $ less ~/.aws/config [default] region = us-east-1 and $ less ~/.aws/credentials [default] aws_access_key_id = *** aws_secret_access_key = *** in ~/.aws/credentials make sure you add the correct keys as provided above. 4. Write to your bucket Now, you should be set up to write to your s3 bucket. Note that in order for your data to be accessible through https after your upload, you need to add --acl public read . aws s3 sync --exclude \"*.DS_Store*\" my/data-dir s3://bbop-ontologies/myproject/data-dir --acl public-read If you have previously pushed data to the same location, you wont be able to set it to \"publicly readable\" by simply rerunning the sync command. If you want to publish previously private data, follow the instructions here , e.g.: aws s3api put-object-acl --bucket s3://bbop-ontologies/myproject/data-dir --key exampleobject --acl public-read","title":"How to set up s3 for uploading upheno data files"},{"location":"howto/set-up-s3/#how-to-set-yourself-up-for-s3","text":"To be able to upload new uPheno release to the uPheno S3 bucket, you need to set yourself up for S3 first. Download and install AWS CLI Obtain secrets from BBOP Add configuration for secrets","title":"How to set yourself up for S3"},{"location":"howto/set-up-s3/#1-download-and-install-aws-cli","text":"The most convenient way to interact with S3 is the AWS Command Line Interface (CLI) . You can find the installers and install instructions on that page (different depending on your Operation System): - For Mac - For Windows","title":"1. Download and install AWS CLI"},{"location":"howto/set-up-s3/#2-obtain-secrets-from-bbop","text":"Next, you need to ask someone at BBOP (such as Chris Mungall or Seth Carbon) to provide you with an account that gives you access to the BBOP s3 buckets. You will have to provide a username. You will receive: - User name - Access key ID- - Secret access key - Console link to sign into bucket","title":"2. Obtain secrets from BBOP"},{"location":"howto/set-up-s3/#3-add-configuration-for-secrets","text":"You will now have to set up your local system. You will create two files: $ less ~/.aws/config [default] region = us-east-1 and $ less ~/.aws/credentials [default] aws_access_key_id = *** aws_secret_access_key = *** in ~/.aws/credentials make sure you add the correct keys as provided above.","title":"3. Add configuration for secrets"},{"location":"howto/set-up-s3/#4-write-to-your-bucket","text":"Now, you should be set up to write to your s3 bucket. Note that in order for your data to be accessible through https after your upload, you need to add --acl public read . aws s3 sync --exclude \"*.DS_Store*\" my/data-dir s3://bbop-ontologies/myproject/data-dir --acl public-read If you have previously pushed data to the same location, you wont be able to set it to \"publicly readable\" by simply rerunning the sync command. If you want to publish previously private data, follow the instructions here , e.g.: aws s3api put-object-acl --bucket s3://bbop-ontologies/myproject/data-dir --key exampleobject --acl public-read","title":"4. Write to your bucket"},{"location":"odk-workflows/","text":"Default ODK Workflows Daily Editors Workflow Release Workflow Manage your ODK Repository Setting up Docker for ODK Imports management Managing the documentation Managing your Automated Testing","title":"Overview"},{"location":"odk-workflows/#default-odk-workflows","text":"Daily Editors Workflow Release Workflow Manage your ODK Repository Setting up Docker for ODK Imports management Managing the documentation Managing your Automated Testing","title":"Default ODK Workflows"},{"location":"odk-workflows/ContinuousIntegration/","text":"Introduction to Continuous Integration Workflows with ODK Historically, most repos have been using Travis CI for continuous integration testing and building, but due to runtime restrictions, we recently switched a lot of our repos to GitHub actions. You can set up your repo with CI by adding this to your configuration file (src/ontology/upheno-odk.yaml): ci: - github_actions When updateing your repo , you will notice a new file being added: .github/workflows/qc.yml . This file contains your CI logic, so if you need to change, or add anything, this is the place! Alternatively, if your repo is in GitLab instead of GitHub, you can set up your repo with GitLab CI by adding this to your configuration file (src/ontology/upheno-odk.yaml): ci: - gitlab-ci This will add a file called .gitlab-ci.yml in the root of your repo.","title":"Manage Continuous Integration"},{"location":"odk-workflows/ContinuousIntegration/#introduction-to-continuous-integration-workflows-with-odk","text":"Historically, most repos have been using Travis CI for continuous integration testing and building, but due to runtime restrictions, we recently switched a lot of our repos to GitHub actions. You can set up your repo with CI by adding this to your configuration file (src/ontology/upheno-odk.yaml): ci: - github_actions When updateing your repo , you will notice a new file being added: .github/workflows/qc.yml . This file contains your CI logic, so if you need to change, or add anything, this is the place! Alternatively, if your repo is in GitLab instead of GitHub, you can set up your repo with GitLab CI by adding this to your configuration file (src/ontology/upheno-odk.yaml): ci: - gitlab-ci This will add a file called .gitlab-ci.yml in the root of your repo.","title":"Introduction to Continuous Integration Workflows with ODK"},{"location":"odk-workflows/EditorsWorkflow/","text":"Editors Workflow The editors workflow is one of the formal workflows to ensure that the ontology is developed correctly according to ontology engineering principles. There are a few different editors workflows: Local editing workflow: Editing the ontology in your local environment by hand, using tools such as Prot\u00e9g\u00e9, ROBOT templates or DOSDP patterns. Completely automated data pipeline (GitHub Actions) DROID workflow This document only covers the first editing workflow, but more will be added in the future Local editing workflow Workflow requirements: git github docker editing tool of choice, e.g. Prot\u00e9g\u00e9, your favourite text editor, etc 1. Create issue Ensure that there is a ticket on your issue tracker that describes the change you are about to make. While this seems optional, this is a very important part of the social contract of building an ontology - no change to the ontology should be performed without a good ticket, describing the motivation and nature of the intended change. 2. Update main branch In your local environment (e.g. your laptop), make sure you are on the main (prev. master ) branch and ensure that you have all the upstream changes, for example: git checkout master git pull 3. Create feature branch Create a new branch. Per convention, we try to use meaningful branch names such as: - issue23removeprocess (where issue 23 is the related issue on GitHub) - issue26addcontributor - release20210101 (for releases) On your command line, this looks like this: git checkout -b issue23removeprocess 4. Perform edit Using your editor of choice, perform the intended edit. For example: Prot\u00e9g\u00e9 Open src/ontology/upheno-edit.owl in Prot\u00e9g\u00e9 Make the change Save the file TextEdit Open src/ontology/upheno-edit.owl in TextEdit (or Sublime, Atom, Vim, Nano) Make the change Save the file Consider the following when making the edit. According to our development philosophy, the only places that should be manually edited are: src/ontology/upheno-edit.owl Any ROBOT templates you chose to use (the TSV files only) Any DOSDP data tables you chose to use (the TSV files, and potentially the associated patterns) components (anything in src/ontology/components ), see here . Imports should not be edited (any edits will be flushed out with the next update). However, refreshing imports is a potentially breaking change - and is discussed elsewhere . Changes should usually be small. Adding or changing 1 term is great. Adding or changing 10 related terms is ok. Adding or changing 100 or more terms at once should be considered very carefully. 4. Check the Git diff This step is very important. Rather than simply trusting your change had the intended effect, we should always use a git diff as a first pass for sanity checking. In our experience, having a visual git client like GitHub Desktop or sourcetree is really helpful for this part. In case you prefer the command line: git status git diff 5. Quality control Now it's time to run your quality control checks. This can either happen locally ( 5a ) or through your continuous integration system ( 7/5b ). 5a. Local testing If you chose to run your test locally: sh run.sh make IMP=false test This will run the whole set of configured ODK tests on including your change. If you have a complex DOSDP pattern pipeline you may want to add PAT=false to skip the potentially lengthy process of rebuilding the patterns. sh run.sh make IMP=false PAT=false test 6. Pull request When you are happy with the changes, you commit your changes to your feature branch, push them upstream (to GitHub) and create a pull request. For example: git add NAMEOFCHANGEDFILES git commit -m \"Added biological process term #12\" git push -u origin issue23removeprocess Then you go to your project on GitHub, and create a new pull request from the branch, for example: https://github.com/INCATools/ontology-development-kit/pulls There is a lot of great advise on how to write pull requests, but at the very least you should: - mention the tickets affected: see #23 to link to a related ticket, or fixes #23 if, by merging this pull request, the ticket is fixed. Tickets in the latter case will be closed automatically by GitHub when the pull request is merged. - summarise the changes in a few sentences. Consider the reviewer: what would they want to know right away. - If the diff is large, provide instructions on how to review the pull request best (sometimes, there are many changed files, but only one important change). 7/5b. Continuous Integration Testing If you didn't run and local quality control checks (see 5a ), you should have Continuous Integration (CI) set up, for example: - Travis - GitHub Actions More on how to set this up here . Once the pull request is created, the CI will automatically trigger. If all is fine, it will show up green, otherwise red. 8. Community review Once all the automatic tests have passed, it is important to put a second set of eyes on the pull request. Ontologies are inherently social - as in that they represent some kind of community consensus on how a domain is organised conceptually. This seems high brow talk, but it is very important that as an ontology editor, you have your work validated by the community you are trying to serve (e.g. your colleagues, other contributors etc.). In our experience, it is hard to get more than one review on a pull request - two is great. You can set up GitHub branch protection to actually require a review before a pull request can be merged! We recommend this. This step seems daunting to some hopefully under-resourced ontologies, but we recommend to put this high up on your list of priorities - train a colleague, reach out! 9. Merge and cleanup When the QC is green and the reviews are in (approvals), it is time to merge the pull request. After the pull request is merged, remember to delete the branch as well (this option will show up as a big button right after you have merged the pull request). If you have not done so, close all the associated tickets fixed by the pull request. 10. Changelog (Optional) It is sometimes difficult to keep track of changes made to an ontology. Some ontology teams opt to document changes in a changelog (simply a text file in your repository) so that when release day comes, you know everything you have changed. This is advisable at least for major changes (such as a new release system, a new pattern or template etc.).","title":"Editors Workflow"},{"location":"odk-workflows/EditorsWorkflow/#editors-workflow","text":"The editors workflow is one of the formal workflows to ensure that the ontology is developed correctly according to ontology engineering principles. There are a few different editors workflows: Local editing workflow: Editing the ontology in your local environment by hand, using tools such as Prot\u00e9g\u00e9, ROBOT templates or DOSDP patterns. Completely automated data pipeline (GitHub Actions) DROID workflow This document only covers the first editing workflow, but more will be added in the future","title":"Editors Workflow"},{"location":"odk-workflows/EditorsWorkflow/#local-editing-workflow","text":"Workflow requirements: git github docker editing tool of choice, e.g. Prot\u00e9g\u00e9, your favourite text editor, etc","title":"Local editing workflow"},{"location":"odk-workflows/EditorsWorkflow/#1-create-issue","text":"Ensure that there is a ticket on your issue tracker that describes the change you are about to make. While this seems optional, this is a very important part of the social contract of building an ontology - no change to the ontology should be performed without a good ticket, describing the motivation and nature of the intended change.","title":"1. Create issue"},{"location":"odk-workflows/EditorsWorkflow/#2-update-main-branch","text":"In your local environment (e.g. your laptop), make sure you are on the main (prev. master ) branch and ensure that you have all the upstream changes, for example: git checkout master git pull","title":"2. Update main branch"},{"location":"odk-workflows/EditorsWorkflow/#3-create-feature-branch","text":"Create a new branch. Per convention, we try to use meaningful branch names such as: - issue23removeprocess (where issue 23 is the related issue on GitHub) - issue26addcontributor - release20210101 (for releases) On your command line, this looks like this: git checkout -b issue23removeprocess","title":"3. Create feature branch"},{"location":"odk-workflows/EditorsWorkflow/#4-perform-edit","text":"Using your editor of choice, perform the intended edit. For example: Prot\u00e9g\u00e9 Open src/ontology/upheno-edit.owl in Prot\u00e9g\u00e9 Make the change Save the file TextEdit Open src/ontology/upheno-edit.owl in TextEdit (or Sublime, Atom, Vim, Nano) Make the change Save the file Consider the following when making the edit. According to our development philosophy, the only places that should be manually edited are: src/ontology/upheno-edit.owl Any ROBOT templates you chose to use (the TSV files only) Any DOSDP data tables you chose to use (the TSV files, and potentially the associated patterns) components (anything in src/ontology/components ), see here . Imports should not be edited (any edits will be flushed out with the next update). However, refreshing imports is a potentially breaking change - and is discussed elsewhere . Changes should usually be small. Adding or changing 1 term is great. Adding or changing 10 related terms is ok. Adding or changing 100 or more terms at once should be considered very carefully.","title":"4. Perform edit"},{"location":"odk-workflows/EditorsWorkflow/#4-check-the-git-diff","text":"This step is very important. Rather than simply trusting your change had the intended effect, we should always use a git diff as a first pass for sanity checking. In our experience, having a visual git client like GitHub Desktop or sourcetree is really helpful for this part. In case you prefer the command line: git status git diff","title":"4. Check the Git diff"},{"location":"odk-workflows/EditorsWorkflow/#5-quality-control","text":"Now it's time to run your quality control checks. This can either happen locally ( 5a ) or through your continuous integration system ( 7/5b ).","title":"5. Quality control"},{"location":"odk-workflows/EditorsWorkflow/#5a-local-testing","text":"If you chose to run your test locally: sh run.sh make IMP=false test This will run the whole set of configured ODK tests on including your change. If you have a complex DOSDP pattern pipeline you may want to add PAT=false to skip the potentially lengthy process of rebuilding the patterns. sh run.sh make IMP=false PAT=false test","title":"5a. Local testing"},{"location":"odk-workflows/EditorsWorkflow/#6-pull-request","text":"When you are happy with the changes, you commit your changes to your feature branch, push them upstream (to GitHub) and create a pull request. For example: git add NAMEOFCHANGEDFILES git commit -m \"Added biological process term #12\" git push -u origin issue23removeprocess Then you go to your project on GitHub, and create a new pull request from the branch, for example: https://github.com/INCATools/ontology-development-kit/pulls There is a lot of great advise on how to write pull requests, but at the very least you should: - mention the tickets affected: see #23 to link to a related ticket, or fixes #23 if, by merging this pull request, the ticket is fixed. Tickets in the latter case will be closed automatically by GitHub when the pull request is merged. - summarise the changes in a few sentences. Consider the reviewer: what would they want to know right away. - If the diff is large, provide instructions on how to review the pull request best (sometimes, there are many changed files, but only one important change).","title":"6. Pull request"},{"location":"odk-workflows/EditorsWorkflow/#75b-continuous-integration-testing","text":"If you didn't run and local quality control checks (see 5a ), you should have Continuous Integration (CI) set up, for example: - Travis - GitHub Actions More on how to set this up here . Once the pull request is created, the CI will automatically trigger. If all is fine, it will show up green, otherwise red.","title":"7/5b. Continuous Integration Testing"},{"location":"odk-workflows/EditorsWorkflow/#8-community-review","text":"Once all the automatic tests have passed, it is important to put a second set of eyes on the pull request. Ontologies are inherently social - as in that they represent some kind of community consensus on how a domain is organised conceptually. This seems high brow talk, but it is very important that as an ontology editor, you have your work validated by the community you are trying to serve (e.g. your colleagues, other contributors etc.). In our experience, it is hard to get more than one review on a pull request - two is great. You can set up GitHub branch protection to actually require a review before a pull request can be merged! We recommend this. This step seems daunting to some hopefully under-resourced ontologies, but we recommend to put this high up on your list of priorities - train a colleague, reach out!","title":"8. Community review"},{"location":"odk-workflows/EditorsWorkflow/#9-merge-and-cleanup","text":"When the QC is green and the reviews are in (approvals), it is time to merge the pull request. After the pull request is merged, remember to delete the branch as well (this option will show up as a big button right after you have merged the pull request). If you have not done so, close all the associated tickets fixed by the pull request.","title":"9. Merge and cleanup"},{"location":"odk-workflows/EditorsWorkflow/#10-changelog-optional","text":"It is sometimes difficult to keep track of changes made to an ontology. Some ontology teams opt to document changes in a changelog (simply a text file in your repository) so that when release day comes, you know everything you have changed. This is advisable at least for major changes (such as a new release system, a new pattern or template etc.).","title":"10. Changelog (Optional)"},{"location":"odk-workflows/ManageAutomatedTest/","text":"Constraint violation checks We can define custom checks using SPARQL . SPARQL queries define bad modelling patterns (missing labels, misspelt URIs, and many more) in the ontology. If these queries return any results, then the build will fail. Custom checks are designed to be run as part of GitHub Actions Continuous Integration testing, but they can also run locally. Steps to add a constraint violation check: Add the SPARQL query in src/sparql . The name of the file should end with -violation.sparql . Please give a name that helps to understand which violation the query wants to check. Add the name of the new file to odk configuration file src/ontology/uberon-odk.yaml : Include the name of the file (without the -violation.sparql part) to the list inside the key custom_sparql_checks that is inside robot_report key. If the robot_report or custom_sparql_checks keys are not available, please add this code block to the end of the file. yaml robot_report: release_reports: False fail_on: ERROR use_labels: False custom_profile: True report_on: - edit custom_sparql_checks: - name-of-the-file-check 3. Update the repository so your new SPARQL check will be included in the QC. sh run.sh make update_repo","title":"Manage automated tests"},{"location":"odk-workflows/ManageAutomatedTest/#constraint-violation-checks","text":"We can define custom checks using SPARQL . SPARQL queries define bad modelling patterns (missing labels, misspelt URIs, and many more) in the ontology. If these queries return any results, then the build will fail. Custom checks are designed to be run as part of GitHub Actions Continuous Integration testing, but they can also run locally.","title":"Constraint violation checks"},{"location":"odk-workflows/ManageAutomatedTest/#steps-to-add-a-constraint-violation-check","text":"Add the SPARQL query in src/sparql . The name of the file should end with -violation.sparql . Please give a name that helps to understand which violation the query wants to check. Add the name of the new file to odk configuration file src/ontology/uberon-odk.yaml : Include the name of the file (without the -violation.sparql part) to the list inside the key custom_sparql_checks that is inside robot_report key. If the robot_report or custom_sparql_checks keys are not available, please add this code block to the end of the file. yaml robot_report: release_reports: False fail_on: ERROR use_labels: False custom_profile: True report_on: - edit custom_sparql_checks: - name-of-the-file-check 3. Update the repository so your new SPARQL check will be included in the QC. sh run.sh make update_repo","title":"Steps to add a constraint violation check:"},{"location":"odk-workflows/ManageDocumentation/","text":"Updating the Documentation The documentation for UPHENO is managed in two places (relative to the repository root): The docs directory contains all the files that pertain to the content of the documentation (more below) the mkdocs.yaml file contains the documentation config, in particular its navigation bar and theme. The documentation is hosted using GitHub pages, on a special branch of the repository (called gh-pages ). It is important that this branch is never deleted - it contains all the files GitHub pages needs to render and deploy the site. It is also important to note that the gh-pages branch should never be edited manually . All changes to the docs happen inside the docs directory on the main branch. Editing the docs Changing content All the documentation is contained in the docs directory, and is managed in Markdown . Markdown is a very simple and convenient way to produce text documents with formatting instructions, and is very easy to learn - it is also used, for example, in GitHub issues. This is a normal editing workflow: Open the .md file you want to change in an editor of choice (a simple text editor is often best). IMPORTANT : Do not edit any files in the docs/odk-workflows/ directory. These files are managed by the ODK system and will be overwritten when the repository is upgraded! If you wish to change these files, make an issue on the ODK issue tracker . Perform the edit and save the file Commit the file to a branch, and create a pull request as usual. If your development team likes your changes, merge the docs into master branch. Deploy the documentation (see below) Deploy the documentation The documentation is not automatically updated from the Markdown, and needs to be deployed deliberately. To do this, perform the following steps: In your terminal, navigate to the edit directory of your ontology, e.g.: cd upheno/src/ontology Now you are ready to build the docs as follows: sh run.sh make update_docs Mkdocs now sets off to build the site from the markdown pages. You will be asked to Enter your username Enter your password (see here for using GitHub access tokens instead) IMPORTANT : Using password based authentication will be deprecated this year (2021). Make sure you read up on personal access tokens if that happens! If everything was successful, you will see a message similar to this one: INFO - Your documentation should shortly be available at: https://obophenotype.github.io/upheno/ 3. Just to double check, you can now navigate to your documentation pages (usually https://obophenotype.github.io/upheno/). Just make sure you give GitHub 2-5 minutes to build the pages!","title":"Manage documentation"},{"location":"odk-workflows/ManageDocumentation/#updating-the-documentation","text":"The documentation for UPHENO is managed in two places (relative to the repository root): The docs directory contains all the files that pertain to the content of the documentation (more below) the mkdocs.yaml file contains the documentation config, in particular its navigation bar and theme. The documentation is hosted using GitHub pages, on a special branch of the repository (called gh-pages ). It is important that this branch is never deleted - it contains all the files GitHub pages needs to render and deploy the site. It is also important to note that the gh-pages branch should never be edited manually . All changes to the docs happen inside the docs directory on the main branch.","title":"Updating the Documentation"},{"location":"odk-workflows/ManageDocumentation/#editing-the-docs","text":"","title":"Editing the docs"},{"location":"odk-workflows/ManageDocumentation/#changing-content","text":"All the documentation is contained in the docs directory, and is managed in Markdown . Markdown is a very simple and convenient way to produce text documents with formatting instructions, and is very easy to learn - it is also used, for example, in GitHub issues. This is a normal editing workflow: Open the .md file you want to change in an editor of choice (a simple text editor is often best). IMPORTANT : Do not edit any files in the docs/odk-workflows/ directory. These files are managed by the ODK system and will be overwritten when the repository is upgraded! If you wish to change these files, make an issue on the ODK issue tracker . Perform the edit and save the file Commit the file to a branch, and create a pull request as usual. If your development team likes your changes, merge the docs into master branch. Deploy the documentation (see below)","title":"Changing content"},{"location":"odk-workflows/ManageDocumentation/#deploy-the-documentation","text":"The documentation is not automatically updated from the Markdown, and needs to be deployed deliberately. To do this, perform the following steps: In your terminal, navigate to the edit directory of your ontology, e.g.: cd upheno/src/ontology Now you are ready to build the docs as follows: sh run.sh make update_docs Mkdocs now sets off to build the site from the markdown pages. You will be asked to Enter your username Enter your password (see here for using GitHub access tokens instead) IMPORTANT : Using password based authentication will be deprecated this year (2021). Make sure you read up on personal access tokens if that happens! If everything was successful, you will see a message similar to this one: INFO - Your documentation should shortly be available at: https://obophenotype.github.io/upheno/ 3. Just to double check, you can now navigate to your documentation pages (usually https://obophenotype.github.io/upheno/). Just make sure you give GitHub 2-5 minutes to build the pages!","title":"Deploy the documentation"},{"location":"odk-workflows/ReleaseWorkflow/","text":"The release workflow The release workflow recommended by the ODK is based on GitHub releases and works as follows: Run a release with the ODK Review the release Merge to main branch Create a GitHub release These steps are outlined in detail in the following. Run a release with the ODK Preparation: Ensure that all your pull requests are merged into your main (master) branch Make sure that all changes to master are committed to GitHub ( git status should say that there are no modified files) Locally make sure you have the latest changes from master ( git pull ) Checkout a new branch (e.g. git checkout -b release-2021-01-01 ) You may or may not want to refresh your imports as part of your release strategy (see here ) Make sure you have the latest ODK installed by running docker pull obolibrary/odkfull To actually run the release, you: Open a command line terminal window and navigate to the src/ontology directory ( cd upheno/src/ontology ) Run release pipeline: sh run.sh make prepare_release -B . Note that for some ontologies, this process can take up to 90 minutes - especially if there are large ontologies you depend on, like PRO or CHEBI. If everything went well, you should see the following output on your machine: Release files are now in ../.. - now you should commit, push and make a release on your git hosting site such as GitHub or GitLab . This will create all the specified release targets (OBO, OWL, JSON, and the variants, ont-full and ont-base) and copy them into your release directory (the top level of your repo). Review the release (Optional) Rough check. This step is frequently skipped, but for the more paranoid among us (like the author of this doc), this is a 3 minute additional effort for some peace of mind. Open the main release (upheno.owl) in you favourite development environment (i.e. Prot\u00e9g\u00e9) and eyeball the hierarchy. We recommend two simple checks: Does the very top level of the hierarchy look ok? This means that all new terms have been imported/updated correctly. Does at least one change that you know should be in this release appear? For example, a new class. This means that the release was actually based on the recent edit file. Commit your changes to the branch and make a pull request In your GitHub pull request, review the following three files in detail (based on our experience): upheno.obo - this reflects a useful subset of the whole ontology (everything that can be covered by OBO format). OBO format has that speaking for it: it is very easy to review! upheno-base.owl - this reflects the asserted axioms in your ontology that you have actually edited. Ideally also take a look at upheno-full.owl , which may reveal interesting new inferences you did not know about. Note that the diff of this file is sometimes quite large. Like with every pull request, we recommend to always employ a second set of eyes when reviewing a PR! Merge the main branch Once your CI checks have passed, and your reviews are completed, you can now merge the branch into your main branch (don't forget to delete the branch afterwards - a big button will appear after the merge is finished). Create a GitHub release Go to your releases page on GitHub by navigating to your repository, and then clicking on releases (usually on the right, for example: https://github.com/obophenotype/upheno/releases). Then click \"Draft new release\" As the tag version you need to choose the date on which your ontologies were build. You can find this, for example, by looking at the upheno.obo file and check the data-version: property. The date needs to be prefixed with a v , so, for example v2020-02-06 . You can write whatever you want in the release title, but we typically write the date again. The description underneath should contain a concise list of changes or term additions. Click \"Publish release\". Done. Debugging typical ontology release problems Problems with memory When you are dealing with large ontologies, you need a lot of memory. When you see error messages relating to large ontologies such as CHEBI, PRO, NCBITAXON, or Uberon, you should think of memory first, see here . Problems when using OBO format based tools Sometimes you will get cryptic error messages when using legacy tools using OBO format, such as the ontology release tool (OORT), which is also available as part of the ODK docker container. In these cases, you need to track down what axiom or annotation actually caused the breakdown. In our experience (in about 60% of the cases) the problem lies with duplicate annotations ( def , comment ) which are illegal in OBO. Here is an example recipe of how to deal with such a problem: If you get a message like make: *** [cl.Makefile:84: oort] Error 255 you might have a OORT error. To debug this, in your terminal enter sh run.sh make IMP=false PAT=false oort -B (assuming you are already in the ontology folder in your directory) This should show you where the error is in the log (eg multiple different definitions) WARNING: THE FIX BELOW IS NOT IDEAL, YOU SHOULD ALWAYS TRY TO FIX UPSTREAM IF POSSIBLE Open upheno-edit.owl in Prot\u00e9g\u00e9 and find the offending term and delete all offending issue (e.g. delete ALL definition, if the problem was \"multiple def tags not allowed\") and save. *While this is not idea, as it will remove all definitions from that term, it will be added back again when the term is fixed in the ontology it was imported from and added back in. Rerun sh run.sh make IMP=false PAT=false oort -B and if it all passes, commit your changes to a branch and make a pull request as usual.","title":"Release Workflow"},{"location":"odk-workflows/ReleaseWorkflow/#the-release-workflow","text":"The release workflow recommended by the ODK is based on GitHub releases and works as follows: Run a release with the ODK Review the release Merge to main branch Create a GitHub release These steps are outlined in detail in the following.","title":"The release workflow"},{"location":"odk-workflows/ReleaseWorkflow/#run-a-release-with-the-odk","text":"Preparation: Ensure that all your pull requests are merged into your main (master) branch Make sure that all changes to master are committed to GitHub ( git status should say that there are no modified files) Locally make sure you have the latest changes from master ( git pull ) Checkout a new branch (e.g. git checkout -b release-2021-01-01 ) You may or may not want to refresh your imports as part of your release strategy (see here ) Make sure you have the latest ODK installed by running docker pull obolibrary/odkfull To actually run the release, you: Open a command line terminal window and navigate to the src/ontology directory ( cd upheno/src/ontology ) Run release pipeline: sh run.sh make prepare_release -B . Note that for some ontologies, this process can take up to 90 minutes - especially if there are large ontologies you depend on, like PRO or CHEBI. If everything went well, you should see the following output on your machine: Release files are now in ../.. - now you should commit, push and make a release on your git hosting site such as GitHub or GitLab . This will create all the specified release targets (OBO, OWL, JSON, and the variants, ont-full and ont-base) and copy them into your release directory (the top level of your repo).","title":"Run a release with the ODK"},{"location":"odk-workflows/ReleaseWorkflow/#review-the-release","text":"(Optional) Rough check. This step is frequently skipped, but for the more paranoid among us (like the author of this doc), this is a 3 minute additional effort for some peace of mind. Open the main release (upheno.owl) in you favourite development environment (i.e. Prot\u00e9g\u00e9) and eyeball the hierarchy. We recommend two simple checks: Does the very top level of the hierarchy look ok? This means that all new terms have been imported/updated correctly. Does at least one change that you know should be in this release appear? For example, a new class. This means that the release was actually based on the recent edit file. Commit your changes to the branch and make a pull request In your GitHub pull request, review the following three files in detail (based on our experience): upheno.obo - this reflects a useful subset of the whole ontology (everything that can be covered by OBO format). OBO format has that speaking for it: it is very easy to review! upheno-base.owl - this reflects the asserted axioms in your ontology that you have actually edited. Ideally also take a look at upheno-full.owl , which may reveal interesting new inferences you did not know about. Note that the diff of this file is sometimes quite large. Like with every pull request, we recommend to always employ a second set of eyes when reviewing a PR!","title":"Review the release"},{"location":"odk-workflows/ReleaseWorkflow/#merge-the-main-branch","text":"Once your CI checks have passed, and your reviews are completed, you can now merge the branch into your main branch (don't forget to delete the branch afterwards - a big button will appear after the merge is finished).","title":"Merge the main branch"},{"location":"odk-workflows/ReleaseWorkflow/#create-a-github-release","text":"Go to your releases page on GitHub by navigating to your repository, and then clicking on releases (usually on the right, for example: https://github.com/obophenotype/upheno/releases). Then click \"Draft new release\" As the tag version you need to choose the date on which your ontologies were build. You can find this, for example, by looking at the upheno.obo file and check the data-version: property. The date needs to be prefixed with a v , so, for example v2020-02-06 . You can write whatever you want in the release title, but we typically write the date again. The description underneath should contain a concise list of changes or term additions. Click \"Publish release\". Done.","title":"Create a GitHub release"},{"location":"odk-workflows/ReleaseWorkflow/#debugging-typical-ontology-release-problems","text":"","title":"Debugging typical ontology release problems"},{"location":"odk-workflows/ReleaseWorkflow/#problems-with-memory","text":"When you are dealing with large ontologies, you need a lot of memory. When you see error messages relating to large ontologies such as CHEBI, PRO, NCBITAXON, or Uberon, you should think of memory first, see here .","title":"Problems with memory"},{"location":"odk-workflows/ReleaseWorkflow/#problems-when-using-obo-format-based-tools","text":"Sometimes you will get cryptic error messages when using legacy tools using OBO format, such as the ontology release tool (OORT), which is also available as part of the ODK docker container. In these cases, you need to track down what axiom or annotation actually caused the breakdown. In our experience (in about 60% of the cases) the problem lies with duplicate annotations ( def , comment ) which are illegal in OBO. Here is an example recipe of how to deal with such a problem: If you get a message like make: *** [cl.Makefile:84: oort] Error 255 you might have a OORT error. To debug this, in your terminal enter sh run.sh make IMP=false PAT=false oort -B (assuming you are already in the ontology folder in your directory) This should show you where the error is in the log (eg multiple different definitions) WARNING: THE FIX BELOW IS NOT IDEAL, YOU SHOULD ALWAYS TRY TO FIX UPSTREAM IF POSSIBLE Open upheno-edit.owl in Prot\u00e9g\u00e9 and find the offending term and delete all offending issue (e.g. delete ALL definition, if the problem was \"multiple def tags not allowed\") and save. *While this is not idea, as it will remove all definitions from that term, it will be added back again when the term is fixed in the ontology it was imported from and added back in. Rerun sh run.sh make IMP=false PAT=false oort -B and if it all passes, commit your changes to a branch and make a pull request as usual.","title":"Problems when using OBO format based tools"},{"location":"odk-workflows/RepoManagement/","text":"Managing your ODK repository Updating your ODK repository Your ODK repositories configuration is managed in src/ontology/upheno-odk.yaml . Once you have made your changes, you can run the following to apply your changes to the repository: sh run.sh make update_repo There are a large number of options that can be set to configure your ODK, but we will only discuss a few of them here. NOTE for Windows users: You may get a cryptic failure such as Set Illegal Option - if the update script located in src/scripts/update_repo.sh was saved using Windows Line endings. These need to change to unix line endings. In Notepad++, for example, you can click on Edit->EOL Conversion->Unix LF to change this. Managing imports You can use the update repository workflow described on this page to perform the following operations to your imports: Add a new import Modify an existing import Remove an import you no longer want Customise an import We will discuss all these workflows in the following. Add new import To add a new import, you first edit your odk config as described above , adding an id to the product list in the import_group section (for the sake of this example, we assume you already import RO, and your goal is to also import GO): import_group: products: - id: ro - id: go Note: our ODK file should only have one import_group which can contain multiple imports (in the products section). Next, you run the update repo workflow to apply these changes. Note that by default, this module is going to be a SLME Bottom module, see here . To change that or customise your module, see section \"Customise an import\". To finalise the addition of your import, perform the following steps: Add an import statement to your src/ontology/upheno-edit.owl file. We suggest to do this using a text editor, by simply copying an existing import declaration and renaming it to the new ontology import, for example as follows: ... Ontology( Import() Import() ... Add your imports redirect to your catalog file src/ontology/catalog-v001.xml , for example: Test whether everything is in order: Refresh your import Open in your Ontology Editor of choice (Protege) and ensure that the expected terms are imported. Note: The catalog file src/ontology/catalog-v001.xml has one purpose: redirecting imports from URLs to local files. For example, if you have Import() in your editors file (the ontology) and in your catalog, tools like robot or Prot\u00e9g\u00e9 will recognize the statement in the catalog file to redirect the URL http://purl.obolibrary.org/obo/upheno/imports/go_import.owl to the local file imports/go_import.owl (which is in your src/ontology directory). Modify an existing import If you simply wish to refresh your import in light of new terms, see here . If you wish to change the type of your module see section \"Customise an import\". Remove an existing import To remove an existing import, perform the following steps: remove the import declaration from your src/ontology/upheno-edit.owl . remove the id from your src/ontology/upheno-odk.yaml , eg. - id: go from the list of products in the import_group . run update repo workflow delete the associated files manually: src/imports/go_import.owl src/imports/go_terms.txt Remove the respective entry from the src/ontology/catalog-v001.xml file. Customise an import By default, an import module extracted from a source ontology will be a SLME module, see here . There are various options to change the default. The following change to your repo config ( src/ontology/upheno-odk.yaml ) will switch the go import from an SLME module to a simple ROBOT filter module: import_group: products: - id: ro - id: go module_type: filter A ROBOT filter module is, essentially, importing all external terms declared by your ontology (see here on how to declare external terms to be imported). Note that the filter module does not consider terms/annotations from namespaces other than the base-namespace of the ontology itself. For example, in the example of GO above, only annotations / axioms related to the GO base IRI (http://purl.obolibrary.org/obo/GO_) would be considered. This behaviour can be changed by adding additional base IRIs as follows: import_group: products: - id: go module_type: filter base_iris: - http://purl.obolibrary.org/obo/GO_ - http://purl.obolibrary.org/obo/CL_ - http://purl.obolibrary.org/obo/BFO If you wish to customise your import entirely, you can specify your own ROBOT command to do so. To do that, add the following to your repo config ( src/ontology/upheno-odk.yaml ): import_group: products: - id: ro - id: go module_type: custom Now add a new goal in your custom Makefile ( src/ontology/upheno.Makefile , not src/ontology/Makefile ). imports/go_import.owl: mirror/ro.owl imports/ro_terms_combined.txt if [ $(IMP) = true ]; then $(ROBOT) query -i $< --update ../sparql/preprocess-module.ru \\ extract -T imports/ro_terms_combined.txt --force true --individuals exclude --method BOT \\ query --update ../sparql/inject-subset-declaration.ru --update ../sparql/postprocess-module.ru \\ annotate --ontology-iri $(ONTBASE)/$@ $(ANNOTATE_ONTOLOGY_VERSION) --output $@.tmp.owl && mv $@.tmp.owl $@; fi Now feel free to change this goal to do whatever you wish it to do! It probably makes some sense (albeit not being a strict necessity), to leave most of the goal instead and replace only: extract -T imports/ro_terms_combined.txt --force true --individuals exclude --method BOT \\ to another ROBOT pipeline. Add a component A component is an import which belongs to your ontology, e.g. is managed by you and your team. Open src/ontology/upheno-odk.yaml If you dont have it yet, add a new top level section components Under the components section, add a new section called products . This is where all your components are specified Under the products section, add a new component, e.g. - filename: mycomp.owl Example components: products: - filename: mycomp.owl When running sh run.sh make update_repo , a new file src/ontology/components/mycomp.owl will be created which you can edit as you see fit. Typical ways to edit: Using a ROBOT template to generate the component (see below) Manually curating the component separately with Prot\u00e9g\u00e9 or any other editor Providing a components/mycomp.owl: make target in src/ontology/upheno.Makefile and provide a custom command to generate the component WARNING : Note that the custom rule to generate the component MUST NOT depend on any other ODK-generated file such as seed files and the like (see issue ). Providing an additional attribute for the component in src/ontology/upheno-odk.yaml , source , to specify that this component should simply be downloaded from somewhere on the web. Adding a new component based on a ROBOT template Since ODK 1.3.2, it is possible to simply link a ROBOT template to a component without having to specify any of the import logic. In order to add a new component that is connected to one or more template files, follow these steps: Open src/ontology/upheno-odk.yaml . Make sure that use_templates: TRUE is set in the global project options. You should also make sure that use_context: TRUE is set in case you are using prefixes in your templates that are not known to robot , such as OMOP: , CPONT: and more. All non-standard prefixes you are using should be added to config/context.json . Add another component to the products section. To activate this component to be template-driven, simply say: use_template: TRUE . This will create an empty template for you in the templates directory, which will automatically be processed when recreating the component (e.g. run.bat make recreate-mycomp ). If you want to use more than one component, use the templates field to add as many template names as you wish. ODK will look for them in the src/templates directory. Advanced: If you want to provide additional processing options, you can use the template_options field. This should be a string with option from robot template . One typical example for additional options you may want to provide is --add-prefixes config/context.json to ensure the prefix map of your context is provided to robot , see above. Example : components: products: - filename: mycomp.owl use_template: TRUE template_options: --add-prefixes config/context.json templates: - template1.tsv - template2.tsv Note : if your mirror is particularly large and complex, read this ODK recommendation .","title":"Manage your ODK Repository"},{"location":"odk-workflows/RepoManagement/#managing-your-odk-repository","text":"","title":"Managing your ODK repository"},{"location":"odk-workflows/RepoManagement/#updating-your-odk-repository","text":"Your ODK repositories configuration is managed in src/ontology/upheno-odk.yaml . Once you have made your changes, you can run the following to apply your changes to the repository: sh run.sh make update_repo There are a large number of options that can be set to configure your ODK, but we will only discuss a few of them here. NOTE for Windows users: You may get a cryptic failure such as Set Illegal Option - if the update script located in src/scripts/update_repo.sh was saved using Windows Line endings. These need to change to unix line endings. In Notepad++, for example, you can click on Edit->EOL Conversion->Unix LF to change this.","title":"Updating your ODK repository"},{"location":"odk-workflows/RepoManagement/#managing-imports","text":"You can use the update repository workflow described on this page to perform the following operations to your imports: Add a new import Modify an existing import Remove an import you no longer want Customise an import We will discuss all these workflows in the following.","title":"Managing imports"},{"location":"odk-workflows/RepoManagement/#add-new-import","text":"To add a new import, you first edit your odk config as described above , adding an id to the product list in the import_group section (for the sake of this example, we assume you already import RO, and your goal is to also import GO): import_group: products: - id: ro - id: go Note: our ODK file should only have one import_group which can contain multiple imports (in the products section). Next, you run the update repo workflow to apply these changes. Note that by default, this module is going to be a SLME Bottom module, see here . To change that or customise your module, see section \"Customise an import\". To finalise the addition of your import, perform the following steps: Add an import statement to your src/ontology/upheno-edit.owl file. We suggest to do this using a text editor, by simply copying an existing import declaration and renaming it to the new ontology import, for example as follows: ... Ontology( Import() Import() ... Add your imports redirect to your catalog file src/ontology/catalog-v001.xml , for example: Test whether everything is in order: Refresh your import Open in your Ontology Editor of choice (Protege) and ensure that the expected terms are imported. Note: The catalog file src/ontology/catalog-v001.xml has one purpose: redirecting imports from URLs to local files. For example, if you have Import() in your editors file (the ontology) and in your catalog, tools like robot or Prot\u00e9g\u00e9 will recognize the statement in the catalog file to redirect the URL http://purl.obolibrary.org/obo/upheno/imports/go_import.owl to the local file imports/go_import.owl (which is in your src/ontology directory).","title":"Add new import"},{"location":"odk-workflows/RepoManagement/#modify-an-existing-import","text":"If you simply wish to refresh your import in light of new terms, see here . If you wish to change the type of your module see section \"Customise an import\".","title":"Modify an existing import"},{"location":"odk-workflows/RepoManagement/#remove-an-existing-import","text":"To remove an existing import, perform the following steps: remove the import declaration from your src/ontology/upheno-edit.owl . remove the id from your src/ontology/upheno-odk.yaml , eg. - id: go from the list of products in the import_group . run update repo workflow delete the associated files manually: src/imports/go_import.owl src/imports/go_terms.txt Remove the respective entry from the src/ontology/catalog-v001.xml file.","title":"Remove an existing import"},{"location":"odk-workflows/RepoManagement/#customise-an-import","text":"By default, an import module extracted from a source ontology will be a SLME module, see here . There are various options to change the default. The following change to your repo config ( src/ontology/upheno-odk.yaml ) will switch the go import from an SLME module to a simple ROBOT filter module: import_group: products: - id: ro - id: go module_type: filter A ROBOT filter module is, essentially, importing all external terms declared by your ontology (see here on how to declare external terms to be imported). Note that the filter module does not consider terms/annotations from namespaces other than the base-namespace of the ontology itself. For example, in the example of GO above, only annotations / axioms related to the GO base IRI (http://purl.obolibrary.org/obo/GO_) would be considered. This behaviour can be changed by adding additional base IRIs as follows: import_group: products: - id: go module_type: filter base_iris: - http://purl.obolibrary.org/obo/GO_ - http://purl.obolibrary.org/obo/CL_ - http://purl.obolibrary.org/obo/BFO If you wish to customise your import entirely, you can specify your own ROBOT command to do so. To do that, add the following to your repo config ( src/ontology/upheno-odk.yaml ): import_group: products: - id: ro - id: go module_type: custom Now add a new goal in your custom Makefile ( src/ontology/upheno.Makefile , not src/ontology/Makefile ). imports/go_import.owl: mirror/ro.owl imports/ro_terms_combined.txt if [ $(IMP) = true ]; then $(ROBOT) query -i $< --update ../sparql/preprocess-module.ru \\ extract -T imports/ro_terms_combined.txt --force true --individuals exclude --method BOT \\ query --update ../sparql/inject-subset-declaration.ru --update ../sparql/postprocess-module.ru \\ annotate --ontology-iri $(ONTBASE)/$@ $(ANNOTATE_ONTOLOGY_VERSION) --output $@.tmp.owl && mv $@.tmp.owl $@; fi Now feel free to change this goal to do whatever you wish it to do! It probably makes some sense (albeit not being a strict necessity), to leave most of the goal instead and replace only: extract -T imports/ro_terms_combined.txt --force true --individuals exclude --method BOT \\ to another ROBOT pipeline.","title":"Customise an import"},{"location":"odk-workflows/RepoManagement/#add-a-component","text":"A component is an import which belongs to your ontology, e.g. is managed by you and your team. Open src/ontology/upheno-odk.yaml If you dont have it yet, add a new top level section components Under the components section, add a new section called products . This is where all your components are specified Under the products section, add a new component, e.g. - filename: mycomp.owl Example components: products: - filename: mycomp.owl When running sh run.sh make update_repo , a new file src/ontology/components/mycomp.owl will be created which you can edit as you see fit. Typical ways to edit: Using a ROBOT template to generate the component (see below) Manually curating the component separately with Prot\u00e9g\u00e9 or any other editor Providing a components/mycomp.owl: make target in src/ontology/upheno.Makefile and provide a custom command to generate the component WARNING : Note that the custom rule to generate the component MUST NOT depend on any other ODK-generated file such as seed files and the like (see issue ). Providing an additional attribute for the component in src/ontology/upheno-odk.yaml , source , to specify that this component should simply be downloaded from somewhere on the web.","title":"Add a component"},{"location":"odk-workflows/RepoManagement/#adding-a-new-component-based-on-a-robot-template","text":"Since ODK 1.3.2, it is possible to simply link a ROBOT template to a component without having to specify any of the import logic. In order to add a new component that is connected to one or more template files, follow these steps: Open src/ontology/upheno-odk.yaml . Make sure that use_templates: TRUE is set in the global project options. You should also make sure that use_context: TRUE is set in case you are using prefixes in your templates that are not known to robot , such as OMOP: , CPONT: and more. All non-standard prefixes you are using should be added to config/context.json . Add another component to the products section. To activate this component to be template-driven, simply say: use_template: TRUE . This will create an empty template for you in the templates directory, which will automatically be processed when recreating the component (e.g. run.bat make recreate-mycomp ). If you want to use more than one component, use the templates field to add as many template names as you wish. ODK will look for them in the src/templates directory. Advanced: If you want to provide additional processing options, you can use the template_options field. This should be a string with option from robot template . One typical example for additional options you may want to provide is --add-prefixes config/context.json to ensure the prefix map of your context is provided to robot , see above. Example : components: products: - filename: mycomp.owl use_template: TRUE template_options: --add-prefixes config/context.json templates: - template1.tsv - template2.tsv Note : if your mirror is particularly large and complex, read this ODK recommendation .","title":"Adding a new component based on a ROBOT template"},{"location":"odk-workflows/RepositoryFileStructure/","text":"Repository structure The main kinds of files in the repository: Release files Imports Components Release files Release file are the file that are considered part of the official ontology release and to be used by the community. A detailed description of the release artefacts can be found here . Imports Imports are subsets of external ontologies that contain terms and axioms you would like to re-use in your ontology. These are considered \"external\", like dependencies in software development, and are not included in your \"base\" product, which is the release artefact which contains only those axioms that you personally maintain. These are the current imports in UPHENO Import URL Type go https://raw.githubusercontent.com/obophenotype/pro_obo_slim/master/pr_slim.owl None nbo http://purl.obolibrary.org/obo/nbo.owl None uberon http://purl.obolibrary.org/obo/uberon.owl None cl http://purl.obolibrary.org/obo/cl.owl None pato http://purl.obolibrary.org/obo/pato.owl None mpath http://purl.obolibrary.org/obo/mpath.owl None ro http://purl.obolibrary.org/obo/ro.owl None omo http://purl.obolibrary.org/obo/omo.owl None chebi https://raw.githubusercontent.com/obophenotype/chebi_obo_slim/main/chebi_slim.owl None oba http://purl.obolibrary.org/obo/oba.owl None ncbitaxon http://purl.obolibrary.org/obo/ncbitaxon/subsets/taxslim.owl None pr https://raw.githubusercontent.com/obophenotype/pro_obo_slim/master/pr_slim.owl None bspo http://purl.obolibrary.org/obo/bspo.owl None ncit http://purl.obolibrary.org/obo/ncit.owl None fbbt http://purl.obolibrary.org/obo/fbbt.owl None fbdv http://purl.obolibrary.org/obo/fbdv.owl None hsapdv http://purl.obolibrary.org/obo/hsapdv.owl None wbls http://purl.obolibrary.org/obo/wbls.owl None wbbt http://purl.obolibrary.org/obo/wbbt.owl None plana http://purl.obolibrary.org/obo/plana.owl None zfa http://purl.obolibrary.org/obo/zfa.owl None xao http://purl.obolibrary.org/obo/xao.owl None hsapdv-uberon http://purl.obolibrary.org/obo/uberon/bridge/uberon-bridge-to-hsapdv.owl custom zfa-uberon http://purl.obolibrary.org/obo/uberon/bridge/uberon-bridge-to-zfa.owl custom zfs-uberon http://purl.obolibrary.org/obo/uberon/bridge/uberon-bridge-to-zfs.owl custom xao-uberon http://purl.obolibrary.org/obo/uberon/bridge/uberon-bridge-to-xao.owl custom wbbt-uberon http://purl.obolibrary.org/obo/uberon/bridge/uberon-bridge-to-wbbt.owl custom wbls-uberon http://purl.obolibrary.org/obo/uberon/bridge/uberon-bridge-to-wbls.owl custom fbbt-uberon http://purl.obolibrary.org/obo/uberon/bridge/uberon-bridge-to-fbbt.owl custom xao-cl http://purl.obolibrary.org/obo/uberon/bridge/cl-bridge-to-xao.owl custom wbbt-cl http://purl.obolibrary.org/obo/uberon/bridge/cl-bridge-to-wbbt.owl custom fbbt-cl http://purl.obolibrary.org/obo/uberon/bridge/cl-bridge-to-fbbt.owl custom Components Components, in contrast to imports, are considered full members of the ontology. This means that any axiom in a component is also included in the ontology base - which means it is considered native to the ontology. While this sounds complicated, consider this: conceptually, no component should be part of more than one ontology. If that seems to be the case, we are most likely talking about an import. Components are often not needed for ontologies, but there are some use cases: There is an automated process that generates and re-generates a part of the ontology A part of the ontology is managed in ROBOT templates The expressivity of the component is higher than the format of the edit file. For example, people still choose to manage their ontology in OBO format (they should not) missing out on a lot of owl features. They may choose to manage logic that is beyond OBO in a specific OWL component. These are the components in UPHENO Filename URL phenotypes_manual.owl None upheno-mappings.owl None cross-species-mappings.owl None","title":"Your ODK Repository Overview"},{"location":"odk-workflows/RepositoryFileStructure/#repository-structure","text":"The main kinds of files in the repository: Release files Imports Components","title":"Repository structure"},{"location":"odk-workflows/RepositoryFileStructure/#release-files","text":"Release file are the file that are considered part of the official ontology release and to be used by the community. A detailed description of the release artefacts can be found here .","title":"Release files"},{"location":"odk-workflows/RepositoryFileStructure/#imports","text":"Imports are subsets of external ontologies that contain terms and axioms you would like to re-use in your ontology. These are considered \"external\", like dependencies in software development, and are not included in your \"base\" product, which is the release artefact which contains only those axioms that you personally maintain. These are the current imports in UPHENO Import URL Type go https://raw.githubusercontent.com/obophenotype/pro_obo_slim/master/pr_slim.owl None nbo http://purl.obolibrary.org/obo/nbo.owl None uberon http://purl.obolibrary.org/obo/uberon.owl None cl http://purl.obolibrary.org/obo/cl.owl None pato http://purl.obolibrary.org/obo/pato.owl None mpath http://purl.obolibrary.org/obo/mpath.owl None ro http://purl.obolibrary.org/obo/ro.owl None omo http://purl.obolibrary.org/obo/omo.owl None chebi https://raw.githubusercontent.com/obophenotype/chebi_obo_slim/main/chebi_slim.owl None oba http://purl.obolibrary.org/obo/oba.owl None ncbitaxon http://purl.obolibrary.org/obo/ncbitaxon/subsets/taxslim.owl None pr https://raw.githubusercontent.com/obophenotype/pro_obo_slim/master/pr_slim.owl None bspo http://purl.obolibrary.org/obo/bspo.owl None ncit http://purl.obolibrary.org/obo/ncit.owl None fbbt http://purl.obolibrary.org/obo/fbbt.owl None fbdv http://purl.obolibrary.org/obo/fbdv.owl None hsapdv http://purl.obolibrary.org/obo/hsapdv.owl None wbls http://purl.obolibrary.org/obo/wbls.owl None wbbt http://purl.obolibrary.org/obo/wbbt.owl None plana http://purl.obolibrary.org/obo/plana.owl None zfa http://purl.obolibrary.org/obo/zfa.owl None xao http://purl.obolibrary.org/obo/xao.owl None hsapdv-uberon http://purl.obolibrary.org/obo/uberon/bridge/uberon-bridge-to-hsapdv.owl custom zfa-uberon http://purl.obolibrary.org/obo/uberon/bridge/uberon-bridge-to-zfa.owl custom zfs-uberon http://purl.obolibrary.org/obo/uberon/bridge/uberon-bridge-to-zfs.owl custom xao-uberon http://purl.obolibrary.org/obo/uberon/bridge/uberon-bridge-to-xao.owl custom wbbt-uberon http://purl.obolibrary.org/obo/uberon/bridge/uberon-bridge-to-wbbt.owl custom wbls-uberon http://purl.obolibrary.org/obo/uberon/bridge/uberon-bridge-to-wbls.owl custom fbbt-uberon http://purl.obolibrary.org/obo/uberon/bridge/uberon-bridge-to-fbbt.owl custom xao-cl http://purl.obolibrary.org/obo/uberon/bridge/cl-bridge-to-xao.owl custom wbbt-cl http://purl.obolibrary.org/obo/uberon/bridge/cl-bridge-to-wbbt.owl custom fbbt-cl http://purl.obolibrary.org/obo/uberon/bridge/cl-bridge-to-fbbt.owl custom","title":"Imports"},{"location":"odk-workflows/RepositoryFileStructure/#components","text":"Components, in contrast to imports, are considered full members of the ontology. This means that any axiom in a component is also included in the ontology base - which means it is considered native to the ontology. While this sounds complicated, consider this: conceptually, no component should be part of more than one ontology. If that seems to be the case, we are most likely talking about an import. Components are often not needed for ontologies, but there are some use cases: There is an automated process that generates and re-generates a part of the ontology A part of the ontology is managed in ROBOT templates The expressivity of the component is higher than the format of the edit file. For example, people still choose to manage their ontology in OBO format (they should not) missing out on a lot of owl features. They may choose to manage logic that is beyond OBO in a specific OWL component. These are the components in UPHENO Filename URL phenotypes_manual.owl None upheno-mappings.owl None cross-species-mappings.owl None","title":"Components"},{"location":"odk-workflows/SettingUpDockerForODK/","text":"Setting up your Docker environment for ODK use One of the most frequent problems with running the ODK for the first time is failure because of lack of memory. This can look like a Java OutOfMemory exception, but more often than not it will appear as something like an Error 137 . There are two places you need to consider to set your memory: Your src/ontology/run.sh (or run.bat) file. You can set the memory in there by adding robot_java_args: '-Xmx8G' to your src/ontology/upheno-odk.yaml file, see for example here . Set your docker memory. By default, it should be about 10-20% more than your robot_java_args variable. You can manage your memory settings by right-clicking on the docker whale in your system bar-->Preferences-->Resources-->Advanced, see picture below.","title":"Setting up Docker for ODK"},{"location":"odk-workflows/SettingUpDockerForODK/#setting-up-your-docker-environment-for-odk-use","text":"One of the most frequent problems with running the ODK for the first time is failure because of lack of memory. This can look like a Java OutOfMemory exception, but more often than not it will appear as something like an Error 137 . There are two places you need to consider to set your memory: Your src/ontology/run.sh (or run.bat) file. You can set the memory in there by adding robot_java_args: '-Xmx8G' to your src/ontology/upheno-odk.yaml file, see for example here . Set your docker memory. By default, it should be about 10-20% more than your robot_java_args variable. You can manage your memory settings by right-clicking on the docker whale in your system bar-->Preferences-->Resources-->Advanced, see picture below.","title":"Setting up your Docker environment for ODK use"},{"location":"odk-workflows/UpdateImports/","text":"Update Imports Workflow This page discusses how to update the contents of your imports, like adding or removing terms. If you are looking to customise imports, like changing the module type, see here . Importing a new term Note: some ontologies now use a merged-import system to manage dynamic imports, for these please follow instructions in the section title \"Using the Base Module approach\". Importing a new term is split into two sub-phases: Declaring the terms to be imported Refreshing imports dynamically Declaring terms to be imported There are three ways to declare terms that are to be imported from an external ontology. Choose the appropriate one for your particular scenario (all three can be used in parallel if need be): Prot\u00e9g\u00e9-based declaration Using term files Using the custom import template Prot\u00e9g\u00e9-based declaration This workflow is to be avoided, but may be appropriate if the editor does not have access to the ODK docker container . This approach also applies to ontologies that use base module import approach. Open your ontology (edit file) in Prot\u00e9g\u00e9 (5.5+). Select 'owl:Thing' Add a new class as usual. Paste the full iri in the 'Name:' field, for example, http://purl.obolibrary.org/obo/CHEBI_50906. Click 'OK' Now you can use this term for example to construct logical definitions. The next time the imports are refreshed (see how to refresh here ), the metadata (labels, definitions, etc.) for this term are imported from the respective external source ontology and becomes visible in your ontology. Using term files Every import has, by default a term file associated with it, which can be found in the imports directory. For example, if you have a GO import in src/ontology/go_import.owl , you will also have an associated term file src/ontology/go_terms.txt . You can add terms in there simply as a list: GO:0008150 GO:0008151 Now you can run the refresh imports workflow ) and the two terms will be imported. Using the custom import template This workflow is appropriate if: You prefer to manage all your imported terms in a single file (rather than multiple files like in the \"Using term files\" workflow above). You wish to augment your imported ontologies with additional information. This requires a cautionary discussion. To enable this workflow, you add the following to your ODK config file ( src/ontology/upheno-odk.yaml ), and update the repository : use_custom_import_module: TRUE Now you can manage your imported terms directly in the custom external terms template, which is located at src/templates/external_import.owl . Note that this file is a ROBOT template , and can, in principle, be extended to include any axioms you like. Before extending the template, however, read the following carefully. The main purpose of the custom import template is to enable the management off all terms to be imported in a centralised place. To enable that, you do not have to do anything other than maintaining the template. So if you, say currently import APOLLO_SV:00000480 , and you wish to import APOLLO_SV:00000532 , you simply add a row like this: ID Entity Type ID TYPE APOLLO_SV:00000480 owl:Class APOLLO_SV:00000532 owl:Class When the imports are refreshed see imports refresh workflow , the term(s) will simply be imported from the configured ontologies. Now, if you wish to extend the Makefile (which is beyond these instructions) and add, say, synonyms to the imported terms, you can do that, but you need to (a) preserve the ID and ENTITY columns and (b) ensure that the ROBOT template is valid otherwise, see here . WARNING . Note that doing this is a widespread antipattern (see related issue ). You should not change the axioms of terms that do not belong into your ontology unless necessary - such changes should always be pushed into the ontology where they belong. However, since people are doing it, whether the OBO Foundry likes it or not, at least using the custom imports module as described here localises the changes to a single simple template and ensures that none of the annotations added this way are merged into the base file . Refresh imports If you want to refresh the import yourself (this may be necessary to pass the travis tests), and you have the ODK installed, you can do the following (using go as an example): First, you navigate in your terminal to the ontology directory (underneath src in your hpo root directory). cd src/ontology Then, you regenerate the import that will now include any new terms you have added. Note: You must have docker installed . sh run.sh make PAT=false imports/go_import.owl -B Since ODK 1.2.27, it is also possible to simply run the following, which is the same as the above: sh run.sh make refresh-go Note that in case you changed the defaults, you need to add IMP=true and/or MIR=true to the command below: sh run.sh make IMP=true MIR=true PAT=false imports/go_import.owl -B If you wish to skip refreshing the mirror, i.e. skip downloading the latest version of the source ontology for your import (e.g. go.owl for your go import) you can set MIR=false instead, which will do the exact same thing as the above, but is easier to remember: sh run.sh make IMP=true MIR=false PAT=false imports/go_import.owl -B Using the Base Module approach Since ODK 1.2.31, we support an entirely new approach to generate modules: Using base files. The idea is to only import axioms from ontologies that actually belong to it . A base file is a subset of the ontology that only contains those axioms that nominally belong there. In other words, the base file does not contain any axioms that belong to another ontology. An example would be this: Imagine this being the full Uberon ontology: Axiom 1: BFO:123 SubClassOf BFO:124 Axiom 1: UBERON:123 SubClassOf BFO:123 Axiom 1: UBERON:124 SubClassOf UBERON 123 The base file is the set of all axioms that are about UBERON terms: Axiom 1: UBERON:123 SubClassOf BFO:123 Axiom 1: UBERON:124 SubClassOf UBERON 123 I.e. Axiom 1: BFO:123 SubClassOf BFO:124 Gets removed. The base file pipeline is a bit more complex than the normal pipelines, because of the logical interactions between the imported ontologies. This is solved by _first merging all mirrors into one huge file and then extracting one mega module from it. Example: Let's say we are importing terms from Uberon, GO and RO in our ontologies. When we use the base pipelines, we 1) First obtain the base (usually by simply downloading it, but there is also an option now to create it with ROBOT) 2) We merge all base files into one big pile 3) Then we extract a single module imports/merged_import.owl The first implementation of this pipeline is PATO, see https://github.com/pato-ontology/pato/blob/master/src/ontology/pato-odk.yaml. To check if your ontology uses this method, check src/ontology/upheno-odk.yaml to see if use_base_merging: TRUE is declared under import_group If your ontology uses Base Module approach, please use the following steps: First, add the term to be imported to the term file associated with it (see above \"Using term files\" section if this is not clear to you) Next, you navigate in your terminal to the ontology directory (underneath src in your hpo root directory). cd src/ontology Then refresh imports by running sh run.sh make imports/merged_import.owl Note: if your mirrors are updated, you can run sh run.sh make no-mirror-refresh-merged This requires quite a bit of memory on your local machine, so if you encounter an error, it might be a lack of memory on your computer. A solution would be to create a ticket in an issue tracker requesting for the term to be imported, and one of the local devs should pick this up and run the import for you. Lastly, restart Prot\u00e9g\u00e9, and the term should be imported in ready to be used.","title":"Manage imports"},{"location":"odk-workflows/UpdateImports/#update-imports-workflow","text":"This page discusses how to update the contents of your imports, like adding or removing terms. If you are looking to customise imports, like changing the module type, see here .","title":"Update Imports Workflow"},{"location":"odk-workflows/UpdateImports/#importing-a-new-term","text":"Note: some ontologies now use a merged-import system to manage dynamic imports, for these please follow instructions in the section title \"Using the Base Module approach\". Importing a new term is split into two sub-phases: Declaring the terms to be imported Refreshing imports dynamically","title":"Importing a new term"},{"location":"odk-workflows/UpdateImports/#declaring-terms-to-be-imported","text":"There are three ways to declare terms that are to be imported from an external ontology. Choose the appropriate one for your particular scenario (all three can be used in parallel if need be): Prot\u00e9g\u00e9-based declaration Using term files Using the custom import template","title":"Declaring terms to be imported"},{"location":"odk-workflows/UpdateImports/#protege-based-declaration","text":"This workflow is to be avoided, but may be appropriate if the editor does not have access to the ODK docker container . This approach also applies to ontologies that use base module import approach. Open your ontology (edit file) in Prot\u00e9g\u00e9 (5.5+). Select 'owl:Thing' Add a new class as usual. Paste the full iri in the 'Name:' field, for example, http://purl.obolibrary.org/obo/CHEBI_50906. Click 'OK' Now you can use this term for example to construct logical definitions. The next time the imports are refreshed (see how to refresh here ), the metadata (labels, definitions, etc.) for this term are imported from the respective external source ontology and becomes visible in your ontology.","title":"Prot\u00e9g\u00e9-based declaration"},{"location":"odk-workflows/UpdateImports/#using-term-files","text":"Every import has, by default a term file associated with it, which can be found in the imports directory. For example, if you have a GO import in src/ontology/go_import.owl , you will also have an associated term file src/ontology/go_terms.txt . You can add terms in there simply as a list: GO:0008150 GO:0008151 Now you can run the refresh imports workflow ) and the two terms will be imported.","title":"Using term files"},{"location":"odk-workflows/UpdateImports/#using-the-custom-import-template","text":"This workflow is appropriate if: You prefer to manage all your imported terms in a single file (rather than multiple files like in the \"Using term files\" workflow above). You wish to augment your imported ontologies with additional information. This requires a cautionary discussion. To enable this workflow, you add the following to your ODK config file ( src/ontology/upheno-odk.yaml ), and update the repository : use_custom_import_module: TRUE Now you can manage your imported terms directly in the custom external terms template, which is located at src/templates/external_import.owl . Note that this file is a ROBOT template , and can, in principle, be extended to include any axioms you like. Before extending the template, however, read the following carefully. The main purpose of the custom import template is to enable the management off all terms to be imported in a centralised place. To enable that, you do not have to do anything other than maintaining the template. So if you, say currently import APOLLO_SV:00000480 , and you wish to import APOLLO_SV:00000532 , you simply add a row like this: ID Entity Type ID TYPE APOLLO_SV:00000480 owl:Class APOLLO_SV:00000532 owl:Class When the imports are refreshed see imports refresh workflow , the term(s) will simply be imported from the configured ontologies. Now, if you wish to extend the Makefile (which is beyond these instructions) and add, say, synonyms to the imported terms, you can do that, but you need to (a) preserve the ID and ENTITY columns and (b) ensure that the ROBOT template is valid otherwise, see here . WARNING . Note that doing this is a widespread antipattern (see related issue ). You should not change the axioms of terms that do not belong into your ontology unless necessary - such changes should always be pushed into the ontology where they belong. However, since people are doing it, whether the OBO Foundry likes it or not, at least using the custom imports module as described here localises the changes to a single simple template and ensures that none of the annotations added this way are merged into the base file .","title":"Using the custom import template"},{"location":"odk-workflows/UpdateImports/#refresh-imports","text":"If you want to refresh the import yourself (this may be necessary to pass the travis tests), and you have the ODK installed, you can do the following (using go as an example): First, you navigate in your terminal to the ontology directory (underneath src in your hpo root directory). cd src/ontology Then, you regenerate the import that will now include any new terms you have added. Note: You must have docker installed . sh run.sh make PAT=false imports/go_import.owl -B Since ODK 1.2.27, it is also possible to simply run the following, which is the same as the above: sh run.sh make refresh-go Note that in case you changed the defaults, you need to add IMP=true and/or MIR=true to the command below: sh run.sh make IMP=true MIR=true PAT=false imports/go_import.owl -B If you wish to skip refreshing the mirror, i.e. skip downloading the latest version of the source ontology for your import (e.g. go.owl for your go import) you can set MIR=false instead, which will do the exact same thing as the above, but is easier to remember: sh run.sh make IMP=true MIR=false PAT=false imports/go_import.owl -B","title":"Refresh imports"},{"location":"odk-workflows/UpdateImports/#using-the-base-module-approach","text":"Since ODK 1.2.31, we support an entirely new approach to generate modules: Using base files. The idea is to only import axioms from ontologies that actually belong to it . A base file is a subset of the ontology that only contains those axioms that nominally belong there. In other words, the base file does not contain any axioms that belong to another ontology. An example would be this: Imagine this being the full Uberon ontology: Axiom 1: BFO:123 SubClassOf BFO:124 Axiom 1: UBERON:123 SubClassOf BFO:123 Axiom 1: UBERON:124 SubClassOf UBERON 123 The base file is the set of all axioms that are about UBERON terms: Axiom 1: UBERON:123 SubClassOf BFO:123 Axiom 1: UBERON:124 SubClassOf UBERON 123 I.e. Axiom 1: BFO:123 SubClassOf BFO:124 Gets removed. The base file pipeline is a bit more complex than the normal pipelines, because of the logical interactions between the imported ontologies. This is solved by _first merging all mirrors into one huge file and then extracting one mega module from it. Example: Let's say we are importing terms from Uberon, GO and RO in our ontologies. When we use the base pipelines, we 1) First obtain the base (usually by simply downloading it, but there is also an option now to create it with ROBOT) 2) We merge all base files into one big pile 3) Then we extract a single module imports/merged_import.owl The first implementation of this pipeline is PATO, see https://github.com/pato-ontology/pato/blob/master/src/ontology/pato-odk.yaml. To check if your ontology uses this method, check src/ontology/upheno-odk.yaml to see if use_base_merging: TRUE is declared under import_group If your ontology uses Base Module approach, please use the following steps: First, add the term to be imported to the term file associated with it (see above \"Using term files\" section if this is not clear to you) Next, you navigate in your terminal to the ontology directory (underneath src in your hpo root directory). cd src/ontology Then refresh imports by running sh run.sh make imports/merged_import.owl Note: if your mirrors are updated, you can run sh run.sh make no-mirror-refresh-merged This requires quite a bit of memory on your local machine, so if you encounter an error, it might be a lack of memory on your computer. A solution would be to create a ticket in an issue tracker requesting for the term to be imported, and one of the local devs should pick this up and run the import for you. Lastly, restart Prot\u00e9g\u00e9, and the term should be imported in ready to be used.","title":"Using the Base Module approach"},{"location":"odk-workflows/components/","text":"Adding components to an ODK repo For details on what components are, please see component section of repository file structure document . To add custom components to an ODK repo, please follow the following steps: 1) Locate your odk yaml file and open it with your favourite text editor (src/ontology/upheno-odk.yaml) 2) Search if there is already a component section to the yaml file, if not add it accordingly, adding the name of your component: components: products: - filename: your-component-name.owl 3) Add the component to your catalog file (src/ontology/catalog-v001.xml) 4) Add the component to the edit file (src/ontology/upheno-edit.obo) for .obo formats: import: http://purl.obolibrary.org/obo/upheno/components/your-component-name.owl for .owl formats: Import() 5) Refresh your repo by running sh run.sh make update_repo - this should create a new file in src/ontology/components. 6) In your custom makefile (src/ontology/upheno.Makefile) add a goal for your custom make file. In this example, the goal is a ROBOT template. $(COMPONENTSDIR)/your-component-name.owl: $(SRC) ../templates/your-component-template.tsv $(ROBOT) template --template ../templates/your-component-template.tsv \\ annotate --ontology-iri $(ONTBASE)/$@ --output $(COMPONENTSDIR)/your-component-name.owl (If using a ROBOT template, do not forget to add your template tsv in src/templates/) 7) Make the file by running sh run.sh make components/your-component-name.owl","title":"Overview of components"},{"location":"odk-workflows/components/#adding-components-to-an-odk-repo","text":"For details on what components are, please see component section of repository file structure document . To add custom components to an ODK repo, please follow the following steps: 1) Locate your odk yaml file and open it with your favourite text editor (src/ontology/upheno-odk.yaml) 2) Search if there is already a component section to the yaml file, if not add it accordingly, adding the name of your component: components: products: - filename: your-component-name.owl 3) Add the component to your catalog file (src/ontology/catalog-v001.xml) 4) Add the component to the edit file (src/ontology/upheno-edit.obo) for .obo formats: import: http://purl.obolibrary.org/obo/upheno/components/your-component-name.owl for .owl formats: Import() 5) Refresh your repo by running sh run.sh make update_repo - this should create a new file in src/ontology/components. 6) In your custom makefile (src/ontology/upheno.Makefile) add a goal for your custom make file. In this example, the goal is a ROBOT template. $(COMPONENTSDIR)/your-component-name.owl: $(SRC) ../templates/your-component-template.tsv $(ROBOT) template --template ../templates/your-component-template.tsv \\ annotate --ontology-iri $(ONTBASE)/$@ --output $(COMPONENTSDIR)/your-component-name.owl (If using a ROBOT template, do not forget to add your template tsv in src/templates/) 7) Make the file by running sh run.sh make components/your-component-name.owl","title":"Adding components to an ODK repo"},{"location":"organization/meetings/","text":"The Unified Phenotype Ontology (uPheno) meeting series The uPheno editors call is held every second Thursday (bi-weekly) on Zoom, provided by members of the Monarch Initiative and co-organised by members of the Alliance and Genome Resources. If you wish to join the meeting, you can open an issue on https://github.com/obophenotype/upheno/issues with the request to be added, or send an email to phenotype-ontologies-editors@googlegroups.com. The meeting coordinator (MC) is the person charged with organising the meeting. The current MC is Ray, @rays22. Meeting preparation The MC prepares the agenda in advance: everyone on the call is very busy and our time is precious. Every agenda item has an associated ticket on GitHub, and a clear set of action items should be added in GitHub Tasklist syntax to the first comment on the issue. If there are issues for any subtasks (e.g. PATO or Uberon edits), the list should be edited to link these. Any items that do not have a subissue but do involve changes to patterns) should be edited to link to implementing PR. It does not matter who wrote the first issue comment, the uPheno team can simply add a tasklist underneath the original comment and refine it over time. Tag all issues which need discussion with \"upheno call\" It must be clear from the task list what the uPheno team should be doing during the call (discuss, decide, review). For example, one item on the task list may read: \"uPheno team to decide on appropriate label for template\". Conversely, no issue should be added to the agenda that does not have a clear set of action items associated with it that should be addressed during the call . These actions may include making and documenting modelling decisions. Go through up to 10 issues on the uPheno issue tracker before each meeting to determine how to progress on them, and add action items. Only if they need to be discussed, add the \"upheno call\" label. Meeting Every meeting should start with a quick (max 5 min, ideally 3 min) overview of all the goals and how they processed. The MC should mention all blockers and goals, even the ones we did not make any progress on, to keep the focus on the priorities: uPheno releases uPheno documentation Pattern creation Patternisation: The process of ensuring that phenotype ontologies are using uPheno conformant templates to define their phenotypes. Harmonisation: The process of ensuring that phenotype patterns are applied consistently across ontologies. For new pattern discussions: Every new pattern proposal should come with a new GitHub issue, appropriately tagged. The issue text should detail the use cases for the pattern well, and these use cases should also be documented in the \"description\" part of the DOSDP YAML file. Uses cases should include expected classifications and why we want them (and potentially classifications to avoid). e.g. axis-specific dimension traits should classify under more abstractly defined dimension traits which in term should classify under Morphology. Add some examples of contexts where grouping along these classifications is useful. Agenda items may include discussion and decisions about more general modelling issues that affect more than one pattern, but these should also be documented as tickets as described above. After the meeting After every meeting, update all issues discussed on GitHub and, in particular, clarify the remaining action items. Ensure that the highest priority issues are discussed first.","title":"Meetings"},{"location":"organization/meetings/#the-unified-phenotype-ontology-upheno-meeting-series","text":"The uPheno editors call is held every second Thursday (bi-weekly) on Zoom, provided by members of the Monarch Initiative and co-organised by members of the Alliance and Genome Resources. If you wish to join the meeting, you can open an issue on https://github.com/obophenotype/upheno/issues with the request to be added, or send an email to phenotype-ontologies-editors@googlegroups.com. The meeting coordinator (MC) is the person charged with organising the meeting. The current MC is Ray, @rays22.","title":"The Unified Phenotype Ontology (uPheno) meeting series"},{"location":"organization/meetings/#meeting-preparation","text":"The MC prepares the agenda in advance: everyone on the call is very busy and our time is precious. Every agenda item has an associated ticket on GitHub, and a clear set of action items should be added in GitHub Tasklist syntax to the first comment on the issue. If there are issues for any subtasks (e.g. PATO or Uberon edits), the list should be edited to link these. Any items that do not have a subissue but do involve changes to patterns) should be edited to link to implementing PR. It does not matter who wrote the first issue comment, the uPheno team can simply add a tasklist underneath the original comment and refine it over time. Tag all issues which need discussion with \"upheno call\" It must be clear from the task list what the uPheno team should be doing during the call (discuss, decide, review). For example, one item on the task list may read: \"uPheno team to decide on appropriate label for template\". Conversely, no issue should be added to the agenda that does not have a clear set of action items associated with it that should be addressed during the call . These actions may include making and documenting modelling decisions. Go through up to 10 issues on the uPheno issue tracker before each meeting to determine how to progress on them, and add action items. Only if they need to be discussed, add the \"upheno call\" label.","title":"Meeting preparation"},{"location":"organization/meetings/#meeting","text":"Every meeting should start with a quick (max 5 min, ideally 3 min) overview of all the goals and how they processed. The MC should mention all blockers and goals, even the ones we did not make any progress on, to keep the focus on the priorities: uPheno releases uPheno documentation Pattern creation Patternisation: The process of ensuring that phenotype ontologies are using uPheno conformant templates to define their phenotypes. Harmonisation: The process of ensuring that phenotype patterns are applied consistently across ontologies. For new pattern discussions: Every new pattern proposal should come with a new GitHub issue, appropriately tagged. The issue text should detail the use cases for the pattern well, and these use cases should also be documented in the \"description\" part of the DOSDP YAML file. Uses cases should include expected classifications and why we want them (and potentially classifications to avoid). e.g. axis-specific dimension traits should classify under more abstractly defined dimension traits which in term should classify under Morphology. Add some examples of contexts where grouping along these classifications is useful. Agenda items may include discussion and decisions about more general modelling issues that affect more than one pattern, but these should also be documented as tickets as described above.","title":"Meeting"},{"location":"organization/meetings/#after-the-meeting","text":"After every meeting, update all issues discussed on GitHub and, in particular, clarify the remaining action items. Ensure that the highest priority issues are discussed first.","title":"After the meeting"},{"location":"organization/outreach/","text":"The Outreach Programme of the Unified Phenotype Ontology (uPheno) development team Outreach-calls The uPheno organises an outreach call every four weeks to listen to external stakeholders describing their need for cross-species phenotype integration. Schedule Date Lesson Notes Recordings 2024/04/05 TBD TBD 2024/3/08 Computational identification of disease models through cross-species phenotype comparison Diego A. Pava, Pilar Cacheiro, Damian Smedley (IMPC) Recording 2024/02/09 Use cases for uPheno in the Alliance of Genome Resources and MGI Sue Bello (Alliance of Genome Resources, MGI) Recording Possible topics Cross-species inference in Variant and Gene Prioritisation algorithms (Exomiser). Cross-species comparison of phenotypic profiles (Monarch Initiative Knowledge Graph) Cross-species data in biomedical knowledge graphs (Kids First)","title":"Outreach"},{"location":"organization/outreach/#the-outreach-programme-of-the-unified-phenotype-ontology-upheno-development-team","text":"","title":"The Outreach Programme of the Unified Phenotype Ontology (uPheno) development team"},{"location":"organization/outreach/#outreach-calls","text":"The uPheno organises an outreach call every four weeks to listen to external stakeholders describing their need for cross-species phenotype integration.","title":"Outreach-calls"},{"location":"organization/outreach/#schedule","text":"Date Lesson Notes Recordings 2024/04/05 TBD TBD 2024/3/08 Computational identification of disease models through cross-species phenotype comparison Diego A. Pava, Pilar Cacheiro, Damian Smedley (IMPC) Recording 2024/02/09 Use cases for uPheno in the Alliance of Genome Resources and MGI Sue Bello (Alliance of Genome Resources, MGI) Recording","title":"Schedule"},{"location":"organization/outreach/#possible-topics","text":"Cross-species inference in Variant and Gene Prioritisation algorithms (Exomiser). Cross-species comparison of phenotypic profiles (Monarch Initiative Knowledge Graph) Cross-species data in biomedical knowledge graphs (Kids First)","title":"Possible topics"},{"location":"reference/components/","text":"","title":"Overview"},{"location":"reference/core_concepts/","text":"Traits and phenotypes - the Conceptual model Overview Table of contents General characteristic Bearer Biological attributes Measurement Phenotypic change Disease General characteristics \"Characteristics\" or \"qualities\" refer to an inherent or distinguishing characteristic or attribute of something or someone. It represents a feature that defines the nature of an object, organism, or entity and can be used to describe, compare, and categorize different things. Characteristics can be either qualitative (such as color, texture, or taste) or quantitative (such as height, weight, or age). The Phenotype And Trait Ontology (PATO) is the reference ontology for general characteristics in the OBO world. Some of the most widely use characteristics can be seen in the following tables quality description example Length ( PATO:0000122 ) A 1-D extent quality which is equal to the distance between two points. Mass ( PATO:0000128 ) A physical quality that inheres in a bearer by virtue of the proportion of the bearer's amount of matter. Amount ( PATO:0000070 ) The number of entities of a type that are part of the whole organism. Morphology ( PATO:0000051 ) A quality of a single physical entity inhering in the bearer by virtue of the bearer's size or shape or structure. Note from the authors: The descriptions above have been taken from PATO, but they are not very.. user friendly. Biological Trait/Characteristics/Attribute Characteristics such as the one above can be used to describe a variety of entities such as biological, environmental and social. We are specifically concerned with biological traits, which are characteristics that refer to an inherent characteristic of a biological entity, such as an organ (the heart), a process (cell division), a chemical entity (lysine) in the blood. The Ontology of Biological Attributes (OBA) is the reference ontology for biological characteristics in the OBO world. There are a few other ontologies that describe biological traits, such as the Vertebrate Phenotype Ontology and the Ascomycete Phenotype Ontology (APO) , but these are more species specific, and, more importantly, are not integrated in the wider EQ modelling framework . Property Example term Definition Length OBA:VT0002544 The length of a digit. Mass OBA:VT0001259 The mass of a multicellular organism. Level OBA:2020005 The amount of lysine in blood. Morphology OBA:VT0005406 The size of a heart. Bearer of Biological Characteristics In biological contexts, the term \"bearer\" refers to the entity that possesses or carries a particular characteristic or quality. The bearer can be any biological entity, such as an organism, an organ, a cell, or even a molecular structure, that exhibits a specific trait or feature. Some examples: Organism as a Bearer: Example: A specific tree (such as an oak tree) is the bearer of the characteristic 'height'. Explanation: The tree as an organism carries or has the property of height, making it the bearer of this characteristic. Organ as a Bearer: Example: The heart of a mammal can be the bearer of the characteristic 'heart size'. Explanation: Here, the heart is the organ that possesses the 'heart size' charactertistic. The characteristic ('heart size') is a quality of the heart itself. Cell as a Bearer: Example: A red blood cell is the bearer of the characteristic 'cell diameter'. Explanation: The diameter is a property of the individual cell. Thus, each red blood cell is the bearer of its diameter measurement. Molecular Structure as a Bearer: Example: A DNA molecule can be the bearer of the characteristic 'sequence length'. Explanation: The length of the DNA sequence is a property of the DNA molecule itself, making the molecule the bearer of this characteristic. Genetic Trait as a Bearer: Example: A fruit fly (Drosophila melanogaster) can be the bearer of a genetic trait like eye color. Explanation: The organism (fruit fly) carries the genetic information that determines eye color, making it the bearer of this specific trait. In each example, the \"bearer\" is the entity that has, carries, or exhibits a particular biological characteristic. This concept is fundamental in biology and bioinformatics for linking specific traits, qualities, or features to the entities that possess them, thereby enabling a clearer understanding and categorization of biological diversity and functions. Phenotypic change A phenotypic change refers to some deviation from reference morphology, physiology, or behavior. This is the most widely used, and most complicated category of phenotype terms for data specialists to understand. Conceptually, a phenotypic abnormality comprises: a biological attribute (which includes a biological bearer) an \"change\" modifier (optionally) a directional modifier (increased / decreased) a comparator Biological attributes such as blood lysine amount (OBA:2020005) have been discussed earlier in this document . The most widely used change modifier used in practice is abnormal (PATO:0000460). This modifier signifies that the phenotypic change term describes a deviation that is abnormal, such as \"Hyperlysinemia\" (HP:0002161), which describes and increased concentration of lysine in the blood. Other modifiers include normal (PATO:0000461), which describes a change within in the normal range (sometimes interpreted as \"no change\"). A directional modifier like increased (PATO:0040043) or decreased (PATO:0040042). In practice, most of our \"characteristic\" terms have specialised directional variants such as decreased amount (PATO:0001997) which can be used to describe phenotypes. Comparators are the most confusing aspects of phenotypic change. The first question someone has to ask when they see a concept describing is change like increased blood lysine levels is \"compared to what?\". Depending on biological context, the assumed comparators vary widely. For example, in clinical phenotyping, it is mostly assumed that a phenotypic feature corresponds to a deviation from the normal range, see HPO docs . Nature of \"comparators\" in the notion of a phenotypic abnormality. In database curation you are effectively de-contextualising the phenotype term, which means you loose the original comparator. normal changed wildtype comparator The Unified Phenotype Ontology (uPheno) is the reference ontology for biological abnormalities in the OBO world. There are a many species-specific ontologies in the OBO world, such as the Mammalian Phenotype Ontology (MP), the Human Phenotype Ontology (HPO) and the Drosophila Phenotype Ontology (DPO), see here . Property Example term Definition Length UPHENO:0072215 Increased length of the digit. Mass UPHENO:0054299 Decreased multicellular organism mass. Level UPHENO:0034327 Decreased level of lysine in blood. Morphology UPHENO:0001471 Increased size of the heart. Concepts that are related and often confused with phenotype terms Disease Measurements In biological data curation, it\u2019s essential to differentiate between measurements and traits. Measurements, such as \u201cblood glucose amount,\u201d are quantitative indicators, providing numerical values. In contrast, traits, like \u201cHyperglycemia,\u201d encompass both qualitative and quantitative characteristics, representing broader phenotypic states. This difference is crucial in ontology modeling, where measurements are directly linked to specific values, while traits reflect more comprehensive biological attributes. For example, \u201cbody temperature\u201d is a measurement, whereas \u201cFever\u201d represents a trait associated with elevated temperatures. Understanding this contrast is fundamental for accurate data representation and interpretation, ensuring nuanced understanding of biological entities and phenotypic variability.","title":"Core concepts"},{"location":"reference/core_concepts/#traits-and-phenotypes-the-conceptual-model","text":"","title":"Traits and phenotypes - the Conceptual model"},{"location":"reference/core_concepts/#overview","text":"","title":"Overview"},{"location":"reference/core_concepts/#table-of-contents","text":"General characteristic Bearer Biological attributes Measurement Phenotypic change Disease","title":"Table of contents"},{"location":"reference/core_concepts/#general-characteristics","text":"\"Characteristics\" or \"qualities\" refer to an inherent or distinguishing characteristic or attribute of something or someone. It represents a feature that defines the nature of an object, organism, or entity and can be used to describe, compare, and categorize different things. Characteristics can be either qualitative (such as color, texture, or taste) or quantitative (such as height, weight, or age). The Phenotype And Trait Ontology (PATO) is the reference ontology for general characteristics in the OBO world. Some of the most widely use characteristics can be seen in the following tables quality description example Length ( PATO:0000122 ) A 1-D extent quality which is equal to the distance between two points. Mass ( PATO:0000128 ) A physical quality that inheres in a bearer by virtue of the proportion of the bearer's amount of matter. Amount ( PATO:0000070 ) The number of entities of a type that are part of the whole organism. Morphology ( PATO:0000051 ) A quality of a single physical entity inhering in the bearer by virtue of the bearer's size or shape or structure. Note from the authors: The descriptions above have been taken from PATO, but they are not very.. user friendly.","title":"General characteristics"},{"location":"reference/core_concepts/#biological-traitcharacteristicsattribute","text":"Characteristics such as the one above can be used to describe a variety of entities such as biological, environmental and social. We are specifically concerned with biological traits, which are characteristics that refer to an inherent characteristic of a biological entity, such as an organ (the heart), a process (cell division), a chemical entity (lysine) in the blood. The Ontology of Biological Attributes (OBA) is the reference ontology for biological characteristics in the OBO world. There are a few other ontologies that describe biological traits, such as the Vertebrate Phenotype Ontology and the Ascomycete Phenotype Ontology (APO) , but these are more species specific, and, more importantly, are not integrated in the wider EQ modelling framework . Property Example term Definition Length OBA:VT0002544 The length of a digit. Mass OBA:VT0001259 The mass of a multicellular organism. Level OBA:2020005 The amount of lysine in blood. Morphology OBA:VT0005406 The size of a heart.","title":"Biological Trait/Characteristics/Attribute"},{"location":"reference/core_concepts/#bearer-of-biological-characteristics","text":"In biological contexts, the term \"bearer\" refers to the entity that possesses or carries a particular characteristic or quality. The bearer can be any biological entity, such as an organism, an organ, a cell, or even a molecular structure, that exhibits a specific trait or feature. Some examples: Organism as a Bearer: Example: A specific tree (such as an oak tree) is the bearer of the characteristic 'height'. Explanation: The tree as an organism carries or has the property of height, making it the bearer of this characteristic. Organ as a Bearer: Example: The heart of a mammal can be the bearer of the characteristic 'heart size'. Explanation: Here, the heart is the organ that possesses the 'heart size' charactertistic. The characteristic ('heart size') is a quality of the heart itself. Cell as a Bearer: Example: A red blood cell is the bearer of the characteristic 'cell diameter'. Explanation: The diameter is a property of the individual cell. Thus, each red blood cell is the bearer of its diameter measurement. Molecular Structure as a Bearer: Example: A DNA molecule can be the bearer of the characteristic 'sequence length'. Explanation: The length of the DNA sequence is a property of the DNA molecule itself, making the molecule the bearer of this characteristic. Genetic Trait as a Bearer: Example: A fruit fly (Drosophila melanogaster) can be the bearer of a genetic trait like eye color. Explanation: The organism (fruit fly) carries the genetic information that determines eye color, making it the bearer of this specific trait. In each example, the \"bearer\" is the entity that has, carries, or exhibits a particular biological characteristic. This concept is fundamental in biology and bioinformatics for linking specific traits, qualities, or features to the entities that possess them, thereby enabling a clearer understanding and categorization of biological diversity and functions.","title":"Bearer of Biological Characteristics"},{"location":"reference/core_concepts/#phenotypic-change","text":"A phenotypic change refers to some deviation from reference morphology, physiology, or behavior. This is the most widely used, and most complicated category of phenotype terms for data specialists to understand. Conceptually, a phenotypic abnormality comprises: a biological attribute (which includes a biological bearer) an \"change\" modifier (optionally) a directional modifier (increased / decreased) a comparator Biological attributes such as blood lysine amount (OBA:2020005) have been discussed earlier in this document . The most widely used change modifier used in practice is abnormal (PATO:0000460). This modifier signifies that the phenotypic change term describes a deviation that is abnormal, such as \"Hyperlysinemia\" (HP:0002161), which describes and increased concentration of lysine in the blood. Other modifiers include normal (PATO:0000461), which describes a change within in the normal range (sometimes interpreted as \"no change\"). A directional modifier like increased (PATO:0040043) or decreased (PATO:0040042). In practice, most of our \"characteristic\" terms have specialised directional variants such as decreased amount (PATO:0001997) which can be used to describe phenotypes. Comparators are the most confusing aspects of phenotypic change. The first question someone has to ask when they see a concept describing is change like increased blood lysine levels is \"compared to what?\". Depending on biological context, the assumed comparators vary widely. For example, in clinical phenotyping, it is mostly assumed that a phenotypic feature corresponds to a deviation from the normal range, see HPO docs . Nature of \"comparators\" in the notion of a phenotypic abnormality. In database curation you are effectively de-contextualising the phenotype term, which means you loose the original comparator. normal changed wildtype comparator The Unified Phenotype Ontology (uPheno) is the reference ontology for biological abnormalities in the OBO world. There are a many species-specific ontologies in the OBO world, such as the Mammalian Phenotype Ontology (MP), the Human Phenotype Ontology (HPO) and the Drosophila Phenotype Ontology (DPO), see here . Property Example term Definition Length UPHENO:0072215 Increased length of the digit. Mass UPHENO:0054299 Decreased multicellular organism mass. Level UPHENO:0034327 Decreased level of lysine in blood. Morphology UPHENO:0001471 Increased size of the heart.","title":"Phenotypic change"},{"location":"reference/core_concepts/#concepts-that-are-related-and-often-confused-with-phenotype-terms","text":"","title":"Concepts that are related and often confused with phenotype terms"},{"location":"reference/core_concepts/#disease","text":"","title":"Disease"},{"location":"reference/core_concepts/#measurements","text":"In biological data curation, it\u2019s essential to differentiate between measurements and traits. Measurements, such as \u201cblood glucose amount,\u201d are quantitative indicators, providing numerical values. In contrast, traits, like \u201cHyperglycemia,\u201d encompass both qualitative and quantitative characteristics, representing broader phenotypic states. This difference is crucial in ontology modeling, where measurements are directly linked to specific values, while traits reflect more comprehensive biological attributes. For example, \u201cbody temperature\u201d is a measurement, whereas \u201cFever\u201d represents a trait associated with elevated temperatures. Understanding this contrast is fundamental for accurate data representation and interpretation, ensuring nuanced understanding of biological entities and phenotypic variability.","title":"Measurements"},{"location":"reference/eq/","text":"","title":"Overview of EQ modelling"},{"location":"reference/imports/","text":"labels Featured Imported ontologies Introduction Imports directory: * http://purl.obolibrary.org/obo/upheno/imports/ Currently the imports includes: * imports/chebi_import.owl \\ * imports/doid_import.owl \\ * imports/go_import.owl \\ * imports/mpath_import.owl \\ * imports/pato_import.owl \\ * imports/pr_import.owl \\ * imports/uberon_import.owl \\ * imports/wbbt_import.owl Anatomy To avoid multiple duplicate classes for heart, lung, skin etc we map all classes to [Uberon] where this is applicable. For more divergent species such as fly and C elegans we use the appropriate species-specific ontology. Currently there are a small number of highly specific classes in FMA that are being used and have no corresponding class in Uberon Methods We use the OWLAPI SyntacticLocalityModularityExtractor, via [OWLTools]. See the http://purl.obolibrary.org/obo/upheno/Makefile for details","title":"Overview"},{"location":"reference/imports/#introduction","text":"Imports directory: * http://purl.obolibrary.org/obo/upheno/imports/ Currently the imports includes: * imports/chebi_import.owl \\ * imports/doid_import.owl \\ * imports/go_import.owl \\ * imports/mpath_import.owl \\ * imports/pato_import.owl \\ * imports/pr_import.owl \\ * imports/uberon_import.owl \\ * imports/wbbt_import.owl","title":"Introduction"},{"location":"reference/imports/#anatomy","text":"To avoid multiple duplicate classes for heart, lung, skin etc we map all classes to [Uberon] where this is applicable. For more divergent species such as fly and C elegans we use the appropriate species-specific ontology. Currently there are a small number of highly specific classes in FMA that are being used and have no corresponding class in Uberon","title":"Anatomy"},{"location":"reference/imports/#methods","text":"We use the OWLAPI SyntacticLocalityModularityExtractor, via [OWLTools]. See the http://purl.obolibrary.org/obo/upheno/Makefile for details","title":"Methods"},{"location":"reference/mappings/","text":"","title":"Overview"},{"location":"reference/patterns/","text":"","title":"Design Patterns Overview"},{"location":"reference/phenotype-ontology-alignment/","text":"Aliging species specific phenotype ontologies Phenotype ontologies use different reference ontologies for their EQs. Everything in uPheno is integrated towards a common set of reference ontologies, in particular Uberon and CL. In order to integrate species-independent anatomy ontologies we employ the following workflow for phenotype ontologies: Create a base-plus module from the ontology Rename all Uberon-aligned entities using ROBOT rename. This replaces basically species specific anatomy references with Uberon anatomy references Delete all species specific references from uPheno (FBBT, XAO, ZFA, etc). This also deletes all EQs which have non-Uberon references. For all remaining species-specific anatomy terms, we retain only the link to the nearest Uberon term. Rules for phenotype ontologies to be integrated Every phenotype ontology must export a base module at the proper PURL location Every phenotype ontology must export a upheno export module at the proper PURL location When two classes are merged in uPheno based on a cross-species mapping, we assert the most general common ancestor as parent.","title":"Overview"},{"location":"reference/phenotype-ontology-alignment/#aliging-species-specific-phenotype-ontologies","text":"Phenotype ontologies use different reference ontologies for their EQs. Everything in uPheno is integrated towards a common set of reference ontologies, in particular Uberon and CL. In order to integrate species-independent anatomy ontologies we employ the following workflow for phenotype ontologies: Create a base-plus module from the ontology Rename all Uberon-aligned entities using ROBOT rename. This replaces basically species specific anatomy references with Uberon anatomy references Delete all species specific references from uPheno (FBBT, XAO, ZFA, etc). This also deletes all EQs which have non-Uberon references. For all remaining species-specific anatomy terms, we retain only the link to the nearest Uberon term.","title":"Aliging species specific phenotype ontologies"},{"location":"reference/phenotype-ontology-alignment/#rules-for-phenotype-ontologies-to-be-integrated","text":"Every phenotype ontology must export a base module at the proper PURL location Every phenotype ontology must export a upheno export module at the proper PURL location When two classes are merged in uPheno based on a cross-species mapping, we assert the most general common ancestor as parent.","title":"Rules for phenotype ontologies to be integrated"},{"location":"reference/qc/","text":"uPheno Quality Control","title":"Overview"},{"location":"reference/qc/#upheno-quality-control","text":"","title":"uPheno Quality Control"},{"location":"reference/reconciliation_effort/","text":"","title":"The Phenotype Reconciliation Effort"},{"location":"reference/use_cases/","text":"Use Cases Computational identification of disease models through cross-species phenotype comparison Use cases for uPheno in the Alliance of Genome Resources and MGI Cross-species inference in Variant and Gene Prioritisation algorithms (Exomiser). Cross-species comparison of phenotypic profiles (Monarch Initiative Knowledge Graph) Cross-species data in biomedical knowledge graphs (Kids First)","title":"Use Cases"},{"location":"reference/use_cases/#use-cases","text":"Computational identification of disease models through cross-species phenotype comparison Use cases for uPheno in the Alliance of Genome Resources and MGI Cross-species inference in Variant and Gene Prioritisation algorithms (Exomiser). Cross-species comparison of phenotypic profiles (Monarch Initiative Knowledge Graph) Cross-species data in biomedical knowledge graphs (Kids First)","title":"Use Cases"},{"location":"reference/components/dpo/","text":"summary Drosophila Phenotype Ontology * The Drosophila phenotype ontology Osumi-Sutherland et al, J Biomed Sem. The DPO is formally a subset of FBcv, made available from http://purl.obolibrary.org/obo/fbcv/dpo.owl Phenotypes in FlyBase may either by assigned to FBcv (dpo) classes, or they may have a phenotype_manifest_in to FBbt (anatomy). For integration we generate the following ontologies: * http://purl.obolibrary.org/obo/upheno/imports/fbbt_phenotype.owl \\ * http://purl.obolibrary.org/obo/upheno/imports/uberon_phenotype.owl \\ * http://purl.obolibrary.org/obo/upheno/imports/go_phenotype.owl \\ * http://purl.obolibrary.org/obo/upheno/imports/cl_phenotype.owl (see Makefile) This includes a phenotype class for every anatomy class - the IRI is suffixed with \"PHENOTYPE\". Using these ontologies, Uberon and CL phenotypes make the groupings. We include * http://purl.obolibrary.org/obo/upheno/dpo/dpo-importer.owl Which imports dpo plus auto-generated fbbt phenotypes. The dpo-importer is included in the [MetazoanImporter] Additional Notes We create a local copy of fbbt that has \"Drosophila \" prefixed to all labels. This gives us a hierarchy: * eye phenotype (defined using Uberon) \\ * compound eye phenotype (defined using Uberon) \\ * drosophila eye phenotype (defined using FBbt) TODO * http://code.google.com/p/cell-ontology/issues/detail?id=115 ensure all CL to FBbt equiv axioms are present (we have good coverage for Uberon)","title":"Drosophila Phenotype Ontology"},{"location":"reference/components/dpo/#additional-notes","text":"We create a local copy of fbbt that has \"Drosophila \" prefixed to all labels. This gives us a hierarchy: * eye phenotype (defined using Uberon) \\ * compound eye phenotype (defined using Uberon) \\ * drosophila eye phenotype (defined using FBbt)","title":"Additional Notes"},{"location":"reference/components/dpo/#todo","text":"* http://code.google.com/p/cell-ontology/issues/detail?id=115 ensure all CL to FBbt equiv axioms are present (we have good coverage for Uberon)","title":"TODO"},{"location":"reference/components/fypo/","text":"summary Fission Yeast Phenotype Ontology * project page - https://sourceforge.net/apps/trac/pombase/wiki/FissionYeastPhenotypeOntology \\ * FYPO: the fission yeast phenotype ontology Harris et al, Bioinformatics Note that the OWL axioms for FYPO are managed directly in the FYPO project repo, we do not duplicate them here","title":"Fission Yeast Phenotype Ontology"},{"location":"reference/components/hp/","text":"summary Human Phenotype Ontology labels Featured Links * http://www.human-phenotype-ontology.org/ \\ * K\u00f6hler S, Doelken SC, Mungall CJ, Bauer S, Firth HV, Bailleul-Forestier I, Black GC, Brown DL, Brudno M, Campbell J, FitzPatrick DR, Eppig JT, Jackson AP, Freson K, Girdea M, Helbig I, Hurst JA, J\u00e4hn J, Jackson LG, Kelly AM, Ledbetter DH, Mansour S, Martin CL, Moss C, Mumford A, Ouwehand WH, Park SM, Riggs ER, Scott RH, Sisodiya S, Van Vooren S, Wapner RJ, Wilkie AO, Wright CF, Vulto-van Silfhout AT, de Leeuw N, de Vries BB, Washingthon NL, Smith CL, Westerfield M, Schofield P, Ruef BJ, Gkoutos GV, Haendel M, Smedley D, Lewis SE, Robinson PN. The Human Phenotype Ontology project: linking molecular biology and disease through phenotype data. Nucleic Acids Res. 2014 Jan; 42 (Database issue):D966-74 [ pubmed ] * HPO browser \\ * HP in OntoBee \\ * HP in OLSVis OWL Axiomatization The OWL axioms for HP are in the src/ontology/hp directory on this site. The structure is analagous to that of the [MP]. Status The OWL axiomatization is updated frequently to stay in sync with changes in the MP Editing the axioms The edit file is currently: * http://purl.obolibrary.org/obo/hp/hp-equivalence-axioms-subq-ubr.owl Edit this in protege.","title":"Human Phenotype Ontology"},{"location":"reference/components/hp/#links","text":"* http://www.human-phenotype-ontology.org/ \\ * K\u00f6hler S, Doelken SC, Mungall CJ, Bauer S, Firth HV, Bailleul-Forestier I, Black GC, Brown DL, Brudno M, Campbell J, FitzPatrick DR, Eppig JT, Jackson AP, Freson K, Girdea M, Helbig I, Hurst JA, J\u00e4hn J, Jackson LG, Kelly AM, Ledbetter DH, Mansour S, Martin CL, Moss C, Mumford A, Ouwehand WH, Park SM, Riggs ER, Scott RH, Sisodiya S, Van Vooren S, Wapner RJ, Wilkie AO, Wright CF, Vulto-van Silfhout AT, de Leeuw N, de Vries BB, Washingthon NL, Smith CL, Westerfield M, Schofield P, Ruef BJ, Gkoutos GV, Haendel M, Smedley D, Lewis SE, Robinson PN. The Human Phenotype Ontology project: linking molecular biology and disease through phenotype data. Nucleic Acids Res. 2014 Jan; 42 (Database issue):D966-74 [ pubmed ] * HPO browser \\ * HP in OntoBee \\ * HP in OLSVis","title":"Links"},{"location":"reference/components/hp/#owl-axiomatization","text":"The OWL axioms for HP are in the src/ontology/hp directory on this site. The structure is analagous to that of the [MP].","title":"OWL Axiomatization"},{"location":"reference/components/hp/#status","text":"The OWL axiomatization is updated frequently to stay in sync with changes in the MP","title":"Status"},{"location":"reference/components/hp/#editing-the-axioms","text":"The edit file is currently: * http://purl.obolibrary.org/obo/hp/hp-equivalence-axioms-subq-ubr.owl Edit this in protege.","title":"Editing the axioms"},{"location":"reference/components/mp/","text":"summary Mouse Phenotype Ontology labels Featured Links * The Mammalian Phenotype Ontology: enabling robust annotation and comparative analysis Smith CL, Eppig JT \\ * MP browser at MGI \\ * MP in OntoBee \\ * MP in OLSVis OWL Axiomatization The OWL axioms for MP are in the src/ontology/mp directory on this site. * http://purl.obolibrary.org/obo/mp.owl - direct conversion of MGI-supplied obo file \\ * http://purl.obolibrary.org/obo/mp/mp-importer.owl - imports additional axioms, including the following ones below: \\ * http://purl.obolibrary.org/obo/mp.owl \\ * http://purl.obolibrary.org/obo/upheno/imports/chebi_import.owl \\ * http://purl.obolibrary.org/obo/upheno/imports/uberon_import.owl \\ * http://purl.obolibrary.org/obo/upheno/imports/pato_import.owl \\ * http://purl.obolibrary.org/obo/upheno/imports/go_import.owl \\ * http://purl.obolibrary.org/obo/upheno/imports/mpath_import.owl \\ * http://purl.obolibrary.org/obo/mp/mp-equivalence-axioms-subq-ubr.owl \\ \\ Status The OWL axiomatization is updated frequently to stay in sync with changes in the MP Editing the axioms The edit file is currently: * http://purl.obolibrary.org/obo/mp/mp-equivalence-axioms-edit.owl Edit this in protege. The file mp-equivalence-axioms.obo is DEPRECATED! TermGenie * http://mp.termgenie.org/ \\ * http://mp.termgenie.org/TermGenieFreeForm","title":"Mammalian Phenotype Ontology"},{"location":"reference/components/mp/#links","text":"* The Mammalian Phenotype Ontology: enabling robust annotation and comparative analysis Smith CL, Eppig JT \\ * MP browser at MGI \\ * MP in OntoBee \\ * MP in OLSVis","title":"Links"},{"location":"reference/components/mp/#owl-axiomatization","text":"The OWL axioms for MP are in the src/ontology/mp directory on this site. * http://purl.obolibrary.org/obo/mp.owl - direct conversion of MGI-supplied obo file \\ * http://purl.obolibrary.org/obo/mp/mp-importer.owl - imports additional axioms, including the following ones below: \\ * http://purl.obolibrary.org/obo/mp.owl \\ * http://purl.obolibrary.org/obo/upheno/imports/chebi_import.owl \\ * http://purl.obolibrary.org/obo/upheno/imports/uberon_import.owl \\ * http://purl.obolibrary.org/obo/upheno/imports/pato_import.owl \\ * http://purl.obolibrary.org/obo/upheno/imports/go_import.owl \\ * http://purl.obolibrary.org/obo/upheno/imports/mpath_import.owl \\ * http://purl.obolibrary.org/obo/mp/mp-equivalence-axioms-subq-ubr.owl \\ \\","title":"OWL Axiomatization"},{"location":"reference/components/mp/#status","text":"The OWL axiomatization is updated frequently to stay in sync with changes in the MP","title":"Status"},{"location":"reference/components/mp/#editing-the-axioms","text":"The edit file is currently: * http://purl.obolibrary.org/obo/mp/mp-equivalence-axioms-edit.owl Edit this in protege. The file mp-equivalence-axioms.obo is DEPRECATED!","title":"Editing the axioms"},{"location":"reference/components/mp/#termgenie","text":"* http://mp.termgenie.org/ \\ * http://mp.termgenie.org/TermGenieFreeForm","title":"TermGenie"},{"location":"reference/components/wbphenotype/","text":"summary Worm Phenotype Ontology labels Featured Links * Schindelman, Gary, et al. Worm Phenotype Ontology: integrating phenotype data within and beyond the C. elegans community. BMC bioinformatics 12.1 (2011): 32. \\ * WBPhenotype in OntoBee \\ * WBPhenotype in OLSVis OWL Axiomatization The OWL axioms for WBPhenotype are in the src/ontology/wbphenotype directory on this site. * http://purl.obolibrary.org/obo/wbphenotype.owl - direct conversion of WormBase-supplied obo file \\ * http://purl.obolibrary.org/obo/wbphenotype/wbphenotype-importer.owl - imports additional axioms. The structure roughly follows that of the [MP]. The worm anatomy is used. Editing the axioms Currently the source is wbphenotype/wbphenotype-equivalence-axioms.obo, the OWL is generated from here. We are considering switching this around, so the OWL is edited, using Protege.","title":"C. elegans Phenotype Ontology"},{"location":"reference/components/wbphenotype/#links","text":"* Schindelman, Gary, et al. Worm Phenotype Ontology: integrating phenotype data within and beyond the C. elegans community. BMC bioinformatics 12.1 (2011): 32. \\ * WBPhenotype in OntoBee \\ * WBPhenotype in OLSVis","title":"Links"},{"location":"reference/components/wbphenotype/#owl-axiomatization","text":"The OWL axioms for WBPhenotype are in the src/ontology/wbphenotype directory on this site. * http://purl.obolibrary.org/obo/wbphenotype.owl - direct conversion of WormBase-supplied obo file \\ * http://purl.obolibrary.org/obo/wbphenotype/wbphenotype-importer.owl - imports additional axioms. The structure roughly follows that of the [MP]. The worm anatomy is used.","title":"OWL Axiomatization"},{"location":"reference/components/wbphenotype/#editing-the-axioms","text":"Currently the source is wbphenotype/wbphenotype-equivalence-axioms.obo, the OWL is generated from here. We are considering switching this around, so the OWL is edited, using Protege.","title":"Editing the axioms"},{"location":"reference/components/zp/","text":"Introduction This page describes the generation of the zebrafish phenotype ontology Details The ZP differs considerably from [HP], [MP] and others. ZFIN do not annotate with a pre-composed phenotype ontology - all annotations compose phenotypes on-the-fly using a combination of PATO, ZFA, GO and other ontologies. We use these combinations to construct ZP on the fly, by naming each distinct combination, assigning it an ID, and placing it in the hierarchy. The process is described here: Sebastian K\u00f6hler, Sandra C Doelken, Barbara J Ruef, Sebastian Bauer, Nicole Washington, Monte Westerfield, George Gkoutos, Paul Schofield, Damian Smedley, Suzanna E Lewis, Peter N Robinson, Christopher J Mungall (2013) Construction and accessibility of a cross-species phenotype ontology along with gene annotations for biomedical research F1000Research The OWL formalism for ZFIN annotations is described here: [ https://docs.google.com/document/d/1Vbokc9aFHR4awNE6DrrLtgpE6axeTS4VEfxqDHsWyPQ/edit # Mapping ZFIN phenotypes to OWL] The java implementation is here: https://github.com/sba1/bio-ontology-zp OWL Axiomatization The OWL axioms for ZP are in zp.owl that is build on our hudson server.","title":"Zebrafish Phenotype Ontology"},{"location":"reference/components/zp/#introduction","text":"This page describes the generation of the zebrafish phenotype ontology","title":"Introduction"},{"location":"reference/components/zp/#details","text":"The ZP differs considerably from [HP], [MP] and others. ZFIN do not annotate with a pre-composed phenotype ontology - all annotations compose phenotypes on-the-fly using a combination of PATO, ZFA, GO and other ontologies. We use these combinations to construct ZP on the fly, by naming each distinct combination, assigning it an ID, and placing it in the hierarchy. The process is described here: Sebastian K\u00f6hler, Sandra C Doelken, Barbara J Ruef, Sebastian Bauer, Nicole Washington, Monte Westerfield, George Gkoutos, Paul Schofield, Damian Smedley, Suzanna E Lewis, Peter N Robinson, Christopher J Mungall (2013) Construction and accessibility of a cross-species phenotype ontology along with gene annotations for biomedical research F1000Research The OWL formalism for ZFIN annotations is described here: [ https://docs.google.com/document/d/1Vbokc9aFHR4awNE6DrrLtgpE6axeTS4VEfxqDHsWyPQ/edit # Mapping ZFIN phenotypes to OWL] The java implementation is here: https://github.com/sba1/bio-ontology-zp","title":"Details"},{"location":"reference/components/zp/#owl-axiomatization","text":"The OWL axioms for ZP are in zp.owl that is build on our hudson server.","title":"OWL Axiomatization"},{"location":"reference/imports/go/","text":"","title":"Gene Ontology"},{"location":"reference/imports/pato/","text":"summary PATO Introduction PATO is an ontology of phenotypic qualities. We use PATO to compose phenotypic descriptions. See [OWLAxiomatization] Details See https://code.google.com/p/pato/","title":"PATO"},{"location":"reference/imports/pato/#introduction","text":"PATO is an ontology of phenotypic qualities. We use PATO to compose phenotypic descriptions. See [OWLAxiomatization]","title":"Introduction"},{"location":"reference/imports/pato/#details","text":"See https://code.google.com/p/pato/","title":"Details"},{"location":"reference/imports/uberon/","text":"","title":"Uberon"},{"location":"reference/mappings/mp_hp/","text":"","title":"MP-HP"},{"location":"reference/modelling/abnormal/","text":"summary How inference of abnormality works Introduction The current design patterns are such that the abnormal qualifier is only added when the quality class in the definition is neutral. However, we still need to be able to infer * Hyoplasia of right ventricle SubClassOf Abnormality of right ventricle Because the latter class definition includes qualifier some abnormal, the SubClassOf axiom will not be entailed unless the qualifier is explicitly stated or inferred Details We achieve this by including an axiom to PATO such that decreased sizes etc are inferred to be qualifier some abnormal. We do this with an exiom in imports/extra.owl * 'deviation(from normal)' SubClassOf qualifier some abnormal Anything under 'increased', 'decreased' etc in PATO is pre-reasoned in PATO to be here. See the following explanation: http://phenotype-ontologies.googlecode.com/svn/trunk/doc/images/has-qualifier-inference.png Limitations For this strategy to work it requires the PATO classes themselves to be classified under deviation from normal. This may not always be the case Notes Do not be distracted by the fact the has-qualifier relation is named has-component at the moment https://code.google.com/p/phenotype-ontologies/issues/detail?id=45 Notes","title":"Abnormal phenotypes"},{"location":"reference/modelling/abnormal/#introduction","text":"The current design patterns are such that the abnormal qualifier is only added when the quality class in the definition is neutral. However, we still need to be able to infer * Hyoplasia of right ventricle SubClassOf Abnormality of right ventricle Because the latter class definition includes qualifier some abnormal, the SubClassOf axiom will not be entailed unless the qualifier is explicitly stated or inferred","title":"Introduction"},{"location":"reference/modelling/abnormal/#details","text":"We achieve this by including an axiom to PATO such that decreased sizes etc are inferred to be qualifier some abnormal. We do this with an exiom in imports/extra.owl * 'deviation(from normal)' SubClassOf qualifier some abnormal Anything under 'increased', 'decreased' etc in PATO is pre-reasoned in PATO to be here. See the following explanation: http://phenotype-ontologies.googlecode.com/svn/trunk/doc/images/has-qualifier-inference.png","title":"Details"},{"location":"reference/modelling/abnormal/#limitations","text":"For this strategy to work it requires the PATO classes themselves to be classified under deviation from normal. This may not always be the case","title":"Limitations"},{"location":"reference/modelling/abnormal/#notes","text":"Do not be distracted by the fact the has-qualifier relation is named has-component at the moment https://code.google.com/p/phenotype-ontologies/issues/detail?id=45","title":"Notes"},{"location":"reference/modelling/abnormal/#notes_1","text":"","title":"Notes"},{"location":"reference/modelling/absence/","text":"summary Discussion of issues pertaining to modeling of absence in phenotype ontologies Introduction Much has been written on the subject of representing absence. Before diving into the logical issues it is worth examining patterns in existing phenotype ontologies to understand what user expectations may typically be for absence. Background * Absence_Phenotypes_in_OWL (Phenoscape Wiki) \\ * (outdated) material on the old PATO wiki . Details Strict logical absence vs absence of some It is not uncommon to see patterns such as From a strict logical perspective, this is inverted. \"absent incisors\" surely means \"absence of all incisors\", or put another way \"the animal has no incisors\". Yet it would be possible to have an animal with *absent* lower incisors and *present* upper incisors, yielding what seems a contradiction (because the subClass axiom would say this partial-incisor animal lacked all incisors). If the ontology were in fact truly modeling \"absence of *all* S\" then it would lead to a curious ontology structure, with the typical tree structure of the anatomy ontology representing S inverted into a polyhierarchical fan in the absent-S ontology. From this it can be cautiously inferred that the intent of the phenotype ontology curator and user is in fact to model \"absence of *some* S\" rather than \"absence of *all* S\". This is not necessarily a universal rule, and the intent may vary depending on whether we are talking about a serially repeated structure or one that typically occurs in isolation. The intent may also be to communicate that a *significant number* of S is missing. Absence as a type of morphology It is also not uncommon to see patterns such as: Again, from a strict logical perspective this is false. If the spleen is absent then what does the \"morphology\" of the parent refer to? However, this inference is clearly a desirable one from the point of view of the phenotype ontology editors and users, as it is common in ontologies for a variety of structures. For example: And: These patterns can be formally defended on developmental biology grounds. \"absence\" here is _not_ equivalent to logical absence. It refers specifically to developmental absence. Furthermore, strict logical absence leads to undesirable inferences. It would be odd to include a nematode worm as having the phenotype \"spleen absent\", because worms have not evolved spleens. But the logical description of not having a spleen as part fets a worm. Similarly, if the strict cardinality interpretation were intended, we would expect to see: i.e. if you're missing your entire hindlegs, you're *necessarily* missing your femurs. But it must be emphatisized that this is *not* how phenotype ontologies are classified. This goes for a wide range of structures and other relationship types. In MP, \"absent limb buds\" are *not* classified under \"absent limbs\", even though it is impossible for a mammal to have limbs without having had limb buds. Absence as part of a size-morphology spectrum The existing treatment of absence can be formally defended morphologically by conceiving of a morphological value space, with \"large\" at one end and \"small\" at the other. As we get continuously smaller, there may come an arbitrary point whereby we say \"surely this is no longer a limb\" (and of course, we are not talking about a pure geometrical size transformation here - as a limb reaches extreme edges of a size range various other morphological changes necessarily happen). But this cutoff is arguably arbitrary, and the resulting discontinuity causes problems. It is simpler to treat absence as being one end of a size scale. Summary This is barely touching the subject, and is intended to illustrate that things may be more subtle than naively treating words like \"absent\" as precisely equivalent to cardinality=0. An understanding of the medical, developmental and evolutionary contexts are absolutely required, together with an understanding of the entailments of different logical formulations. Even though existing phenotype ontologies may not be conceived of formally, it is implicit than they do not model absence as being equivalent to cardinality=0 / not(has_part), because the structure of these ontologies would look radically different. TODO Link to Jim Balhoff's PhenoDay paper and discussion Here's the link: http://phenoday2014.bio-lark.org/pdf/11.pdf","title":"Absence modelling"},{"location":"reference/modelling/absence/#introduction","text":"Much has been written on the subject of representing absence. Before diving into the logical issues it is worth examining patterns in existing phenotype ontologies to understand what user expectations may typically be for absence.","title":"Introduction"},{"location":"reference/modelling/absence/#background","text":"* Absence_Phenotypes_in_OWL (Phenoscape Wiki) \\ * (outdated) material on the old PATO wiki .","title":"Background"},{"location":"reference/modelling/absence/#details","text":"","title":"Details"},{"location":"reference/modelling/absence/#strict-logical-absence-vs-absence-of-some","text":"It is not uncommon to see patterns such as From a strict logical perspective, this is inverted. \"absent incisors\" surely means \"absence of all incisors\", or put another way \"the animal has no incisors\". Yet it would be possible to have an animal with *absent* lower incisors and *present* upper incisors, yielding what seems a contradiction (because the subClass axiom would say this partial-incisor animal lacked all incisors). If the ontology were in fact truly modeling \"absence of *all* S\" then it would lead to a curious ontology structure, with the typical tree structure of the anatomy ontology representing S inverted into a polyhierarchical fan in the absent-S ontology. From this it can be cautiously inferred that the intent of the phenotype ontology curator and user is in fact to model \"absence of *some* S\" rather than \"absence of *all* S\". This is not necessarily a universal rule, and the intent may vary depending on whether we are talking about a serially repeated structure or one that typically occurs in isolation. The intent may also be to communicate that a *significant number* of S is missing.","title":"Strict logical absence vs absence of some"},{"location":"reference/modelling/absence/#absence-as-a-type-of-morphology","text":"It is also not uncommon to see patterns such as: Again, from a strict logical perspective this is false. If the spleen is absent then what does the \"morphology\" of the parent refer to? However, this inference is clearly a desirable one from the point of view of the phenotype ontology editors and users, as it is common in ontologies for a variety of structures. For example: And: These patterns can be formally defended on developmental biology grounds. \"absence\" here is _not_ equivalent to logical absence. It refers specifically to developmental absence. Furthermore, strict logical absence leads to undesirable inferences. It would be odd to include a nematode worm as having the phenotype \"spleen absent\", because worms have not evolved spleens. But the logical description of not having a spleen as part fets a worm. Similarly, if the strict cardinality interpretation were intended, we would expect to see: i.e. if you're missing your entire hindlegs, you're *necessarily* missing your femurs. But it must be emphatisized that this is *not* how phenotype ontologies are classified. This goes for a wide range of structures and other relationship types. In MP, \"absent limb buds\" are *not* classified under \"absent limbs\", even though it is impossible for a mammal to have limbs without having had limb buds.","title":"Absence as a type of morphology"},{"location":"reference/modelling/absence/#absence-as-part-of-a-size-morphology-spectrum","text":"The existing treatment of absence can be formally defended morphologically by conceiving of a morphological value space, with \"large\" at one end and \"small\" at the other. As we get continuously smaller, there may come an arbitrary point whereby we say \"surely this is no longer a limb\" (and of course, we are not talking about a pure geometrical size transformation here - as a limb reaches extreme edges of a size range various other morphological changes necessarily happen). But this cutoff is arguably arbitrary, and the resulting discontinuity causes problems. It is simpler to treat absence as being one end of a size scale.","title":"Absence as part of a size-morphology spectrum"},{"location":"reference/modelling/absence/#summary","text":"This is barely touching the subject, and is intended to illustrate that things may be more subtle than naively treating words like \"absent\" as precisely equivalent to cardinality=0. An understanding of the medical, developmental and evolutionary contexts are absolutely required, together with an understanding of the entailments of different logical formulations. Even though existing phenotype ontologies may not be conceived of formally, it is implicit than they do not model absence as being equivalent to cardinality=0 / not(has_part), because the structure of these ontologies would look radically different.","title":"Summary"},{"location":"reference/modelling/absence/#todo","text":"Link to Jim Balhoff's PhenoDay paper and discussion Here's the link: http://phenoday2014.bio-lark.org/pdf/11.pdf","title":"TODO"},{"location":"reference/qc/odk_checks/","text":"ODK: Basic Quality Control","title":"Standard OBO checks"},{"location":"reference/qc/odk_checks/#odk-basic-quality-control","text":"","title":"ODK: Basic Quality Control"},{"location":"tutorials/analysis/","text":"Using OBA and uPheno for data analysis Authors: James McLaughlin Nicolas Matentzoglu Last update: 27.03.2024. Semantic similarity Mappings Data aggregation","title":"Using uPheno in Data Analysis"},{"location":"tutorials/analysis/#using-oba-and-upheno-for-data-analysis","text":"Authors: James McLaughlin Nicolas Matentzoglu Last update: 27.03.2024.","title":"Using OBA and uPheno for data analysis"},{"location":"tutorials/analysis/#semantic-similarity","text":"","title":"Semantic similarity"},{"location":"tutorials/analysis/#mappings","text":"","title":"Mappings"},{"location":"tutorials/analysis/#data-aggregation","text":"","title":"Data aggregation"},{"location":"tutorials/curation/","text":"Using OBA and uPheno in data curation Authors: James McLaughlin Nicolas Matentzoglu Last update: 27.03.2024. Overview Phenotyping is, in essence, the process of recording the observable characteristics, or phenotypic profile, of an organism. There are many use cases for doing this task: clinicians have to record a patient's phenotypic profile to facilitate more accurate diagnosis. Researchers have to record phenotypic profiles of model organisms to characterise them to assess interventions (genetic or drug or otherwise). Curators that seek to build a knowledge base which contains associations between phenotypes and other data types need to extract information about phenotypes from often unstructured data sources. All of these are different processes, but the essence is the same: a set of observable characteristics has to be recorded using terms from a controlled vocabulary. There are different schools about how to record phenotypes in a structured manner. Quantified phenotypes can be recorded using either a trait in combination with a measurement datum (\u201chead circumference\u201d, \u201c35 cm\u201d) or a qualified term expressing \u201cphenotypic change\u201d (\u201cincreased head circumference\u201d). Furthermore, we can express phenotype terms as \u201cpre-coordinated\u201d terms, like \u201cincreased head circumference\u201d or a \u201cpost-coordinated expression\u201d, like \u201chead\u201d, \u201ccircumference\u201d, \u201cincreased\u201d). In the following, we will describe the different concepts and categories around phenotype data, and provide an introduction on how to best use them. Pre-requisites Familiarise yourself with the core concepts Examples of phenotype data Category Example datasets Example phenotype Gene to phenotype associations Online Mendelian Inheritance in Man (OMIM) , Human Phenotype Ontology (HPO) , Gene Ontology (GO) Achondroplasia (associated with FGFR3 gene mutations) Gene to disease associations The Cancer Genome Atlas (TCGA) , Online Mendelian Inheritance in Man (OMIM) , GWAS Catalog Breast invasive carcinoma (associated with BRCA1/BRCA2 mutations) Phenotype-phenotype semantic similarity Human Phenotype Ontology (HPO) , Unified Medical Language System (UMLS) , Disease Ontology (DO) Cardiac abnormalities (semantic similarity with congenital heart defects) Quantified trait data (QTL etc) NHGRI-EBI GWAS Catalog , Genotype-Tissue Expression (GTEx) , The Human Protein Atlas Height (quantified trait associated with SNPs in genomic regions) Electronic health records Medical Information Mart for Intensive Care III (MIMIC-III) , UK Biobank , IBM Watson Health Acute kidney injury (recorded diagnosis during ICU stay) Epidemiological datasets Framingham Heart Study , National Health and Nutrition Examination Survey (NHANES) , Global Burden of Disease Study (GBD) Cardiovascular disease (epidemiological study of risk factors and disease incidence) Clinical trial datasets ClinicalTrials.gov , European Union Clinical Trials Register (EUCTR) , International Clinical Trials Registry Platform (ICTRP) Treatment response (clinical trial data on efficacy and safety outcomes) Environmental exposure datasets Environmental Protection Agency Air Quality System (EPA AQS) , Global Historical Climatology Network (GHCN) , National Centers for Environmental Information Climate Data Online (NCEI CDO) Respiratory diseases (association with air pollutant exposure) Population surveys e.g., UK Biobank UK Biobank , National Health Interview Survey (NHIS) , National Health and Nutrition Examination Survey (NHANES) Chronic diseases (population-based study on disease prevalence and risk factors) Behavioral observation datasets National Survey on Drug Use and Health (NSDUH) , Add Health , British Cohort Study (BCS) Substance abuse disorders (survey data on drug consumption and addiction) Important relationships wrt to phenotype data inheres in / characteristic of bearer of Types of phenotype data Precoordinated phenotype Post-coordinated phenotype Attribute-measurement","title":"Using uPheno in Curation"},{"location":"tutorials/curation/#using-oba-and-upheno-in-data-curation","text":"Authors: James McLaughlin Nicolas Matentzoglu Last update: 27.03.2024.","title":"Using OBA and uPheno in data curation"},{"location":"tutorials/curation/#overview","text":"Phenotyping is, in essence, the process of recording the observable characteristics, or phenotypic profile, of an organism. There are many use cases for doing this task: clinicians have to record a patient's phenotypic profile to facilitate more accurate diagnosis. Researchers have to record phenotypic profiles of model organisms to characterise them to assess interventions (genetic or drug or otherwise). Curators that seek to build a knowledge base which contains associations between phenotypes and other data types need to extract information about phenotypes from often unstructured data sources. All of these are different processes, but the essence is the same: a set of observable characteristics has to be recorded using terms from a controlled vocabulary. There are different schools about how to record phenotypes in a structured manner. Quantified phenotypes can be recorded using either a trait in combination with a measurement datum (\u201chead circumference\u201d, \u201c35 cm\u201d) or a qualified term expressing \u201cphenotypic change\u201d (\u201cincreased head circumference\u201d). Furthermore, we can express phenotype terms as \u201cpre-coordinated\u201d terms, like \u201cincreased head circumference\u201d or a \u201cpost-coordinated expression\u201d, like \u201chead\u201d, \u201ccircumference\u201d, \u201cincreased\u201d). In the following, we will describe the different concepts and categories around phenotype data, and provide an introduction on how to best use them.","title":"Overview"},{"location":"tutorials/curation/#pre-requisites","text":"Familiarise yourself with the core concepts","title":"Pre-requisites"},{"location":"tutorials/curation/#examples-of-phenotype-data","text":"Category Example datasets Example phenotype Gene to phenotype associations Online Mendelian Inheritance in Man (OMIM) , Human Phenotype Ontology (HPO) , Gene Ontology (GO) Achondroplasia (associated with FGFR3 gene mutations) Gene to disease associations The Cancer Genome Atlas (TCGA) , Online Mendelian Inheritance in Man (OMIM) , GWAS Catalog Breast invasive carcinoma (associated with BRCA1/BRCA2 mutations) Phenotype-phenotype semantic similarity Human Phenotype Ontology (HPO) , Unified Medical Language System (UMLS) , Disease Ontology (DO) Cardiac abnormalities (semantic similarity with congenital heart defects) Quantified trait data (QTL etc) NHGRI-EBI GWAS Catalog , Genotype-Tissue Expression (GTEx) , The Human Protein Atlas Height (quantified trait associated with SNPs in genomic regions) Electronic health records Medical Information Mart for Intensive Care III (MIMIC-III) , UK Biobank , IBM Watson Health Acute kidney injury (recorded diagnosis during ICU stay) Epidemiological datasets Framingham Heart Study , National Health and Nutrition Examination Survey (NHANES) , Global Burden of Disease Study (GBD) Cardiovascular disease (epidemiological study of risk factors and disease incidence) Clinical trial datasets ClinicalTrials.gov , European Union Clinical Trials Register (EUCTR) , International Clinical Trials Registry Platform (ICTRP) Treatment response (clinical trial data on efficacy and safety outcomes) Environmental exposure datasets Environmental Protection Agency Air Quality System (EPA AQS) , Global Historical Climatology Network (GHCN) , National Centers for Environmental Information Climate Data Online (NCEI CDO) Respiratory diseases (association with air pollutant exposure) Population surveys e.g., UK Biobank UK Biobank , National Health Interview Survey (NHIS) , National Health and Nutrition Examination Survey (NHANES) Chronic diseases (population-based study on disease prevalence and risk factors) Behavioral observation datasets National Survey on Drug Use and Health (NSDUH) , Add Health , British Cohort Study (BCS) Substance abuse disorders (survey data on drug consumption and addiction)","title":"Examples of phenotype data"},{"location":"tutorials/curation/#important-relationships-wrt-to-phenotype-data","text":"inheres in / characteristic of bearer of","title":"Important relationships wrt to phenotype data"},{"location":"tutorials/curation/#types-of-phenotype-data","text":"Precoordinated phenotype Post-coordinated phenotype Attribute-measurement","title":"Types of phenotype data"},{"location":"tutorials/grouping/","text":"","title":"Using uPheno for Grouping Data"}]} \ No newline at end of file diff --git a/search/worker.js b/search/worker.js new file mode 100644 index 00000000..8628dbce --- /dev/null +++ b/search/worker.js @@ -0,0 +1,133 @@ +var base_path = 'function' === typeof importScripts ? '.' : '/search/'; +var allowSearch = false; +var index; +var documents = {}; +var lang = ['en']; +var data; + +function getScript(script, callback) { + console.log('Loading script: ' + script); + $.getScript(base_path + script).done(function () { + callback(); + }).fail(function (jqxhr, settings, exception) { + console.log('Error: ' + exception); + }); +} + +function getScriptsInOrder(scripts, callback) { + if (scripts.length === 0) { + callback(); + return; + } + getScript(scripts[0], function() { + getScriptsInOrder(scripts.slice(1), callback); + }); +} + +function loadScripts(urls, callback) { + if( 'function' === typeof importScripts ) { + importScripts.apply(null, urls); + callback(); + } else { + getScriptsInOrder(urls, callback); + } +} + +function onJSONLoaded () { + data = JSON.parse(this.responseText); + var scriptsToLoad = ['lunr.js']; + if (data.config && data.config.lang && data.config.lang.length) { + lang = data.config.lang; + } + if (lang.length > 1 || lang[0] !== "en") { + scriptsToLoad.push('lunr.stemmer.support.js'); + if (lang.length > 1) { + scriptsToLoad.push('lunr.multi.js'); + } + if (lang.includes("ja") || lang.includes("jp")) { + scriptsToLoad.push('tinyseg.js'); + } + for (var i=0; i < lang.length; i++) { + if (lang[i] != 'en') { + scriptsToLoad.push(['lunr', lang[i], 'js'].join('.')); + } + } + } + loadScripts(scriptsToLoad, onScriptsLoaded); +} + +function onScriptsLoaded () { + console.log('All search scripts loaded, building Lunr index...'); + if (data.config && data.config.separator && data.config.separator.length) { + lunr.tokenizer.separator = new RegExp(data.config.separator); + } + + if (data.index) { + index = lunr.Index.load(data.index); + data.docs.forEach(function (doc) { + documents[doc.location] = doc; + }); + console.log('Lunr pre-built index loaded, search ready'); + } else { + index = lunr(function () { + if (lang.length === 1 && lang[0] !== "en" && lunr[lang[0]]) { + this.use(lunr[lang[0]]); + } else if (lang.length > 1) { + this.use(lunr.multiLanguage.apply(null, lang)); // spread operator not supported in all browsers: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Spread_operator#Browser_compatibility + } + this.field('title'); + this.field('text'); + this.ref('location'); + + for (var i=0; i < data.docs.length; i++) { + var doc = data.docs[i]; + this.add(doc); + documents[doc.location] = doc; + } + }); + console.log('Lunr index built, search ready'); + } + allowSearch = true; + postMessage({config: data.config}); + postMessage({allowSearch: allowSearch}); +} + +function init () { + var oReq = new XMLHttpRequest(); + oReq.addEventListener("load", onJSONLoaded); + var index_path = base_path + '/search_index.json'; + if( 'function' === typeof importScripts ){ + index_path = 'search_index.json'; + } + oReq.open("GET", index_path); + oReq.send(); +} + +function search (query) { + if (!allowSearch) { + console.error('Assets for search still loading'); + return; + } + + var resultDocuments = []; + var results = index.search(query); + for (var i=0; i < results.length; i++){ + var result = results[i]; + doc = documents[result.ref]; + doc.summary = doc.text.substring(0, 200); + resultDocuments.push(doc); + } + return resultDocuments; +} + +if( 'function' === typeof importScripts ) { + onmessage = function (e) { + if (e.data.init) { + init(); + } else if (e.data.query) { + postMessage({ results: search(e.data.query) }); + } else { + console.error("Worker - Unrecognized message: " + e); + } + }; +} diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 00000000..38361b7b --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,248 @@ + + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + + None + 2024-03-28 + daily + + \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz new file mode 100644 index 00000000..45391f2b Binary files /dev/null and b/sitemap.xml.gz differ diff --git a/tutorials/analysis/index.html b/tutorials/analysis/index.html new file mode 100644 index 00000000..4f5918b6 --- /dev/null +++ b/tutorials/analysis/index.html @@ -0,0 +1,1484 @@ + + + + + + + + + + + + + + + + + + + + + + Using uPheno in Data Analysis - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Using uPheno in Data Analysis

+ +

Using OBA and uPheno for data analysis

+

Authors:

+ +

Last update: 27.03.2024.

+

Semantic similarity

+

Mappings

+

Data aggregation

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/tutorials/curation/index.html b/tutorials/curation/index.html new file mode 100644 index 00000000..04d0e65e --- /dev/null +++ b/tutorials/curation/index.html @@ -0,0 +1,1595 @@ + + + + + + + + + + + + + + + + + + + + + + Using uPheno in Curation - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + +

Using uPheno in Curation

+ +

Using OBA and uPheno in data curation

+

Authors:

+ +

Last update: 27.03.2024.

+

Overview

+

Phenotyping is, in essence, the process of recording the observable characteristics, or phenotypic profile, of an organism. +There are many use cases for doing this task: clinicians have to record a patient's phenotypic profile to facilitate more accurate diagnosis. +Researchers have to record phenotypic profiles of model organisms to characterise them to assess interventions (genetic or drug or otherwise). +Curators that seek to build a knowledge base which contains associations between phenotypes and other data types need to extract information about phenotypes from often unstructured data sources.

+

All of these are different processes, but the essence is the same: a set of observable characteristics has to be recorded using terms from a controlled vocabulary.

+

There are different schools about how to record phenotypes in a structured manner. +Quantified phenotypes can be recorded using either a trait in combination with a measurement datum (“head circumference”, “35 cm”) or a qualified term expressing “phenotypic change” (“increased head circumference”). +Furthermore, we can express phenotype terms as “pre-coordinated” terms, like “increased head circumference” or a “post-coordinated expression”, like “head”, “circumference”, “increased”). In the following, we will describe the different concepts and categories around phenotype data, and provide an introduction on how to best use them.

+

Pre-requisites

+ +

Examples of phenotype data

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
CategoryExample datasetsExample phenotype
Gene to phenotype associationsOnline Mendelian Inheritance in Man (OMIM), Human Phenotype Ontology (HPO), Gene Ontology (GO)Achondroplasia (associated with FGFR3 gene mutations)
Gene to disease associationsThe Cancer Genome Atlas (TCGA), Online Mendelian Inheritance in Man (OMIM), GWAS CatalogBreast invasive carcinoma (associated with BRCA1/BRCA2 mutations)
Phenotype-phenotype semantic similarityHuman Phenotype Ontology (HPO), Unified Medical Language System (UMLS), Disease Ontology (DO)Cardiac abnormalities (semantic similarity with congenital heart defects)
Quantified trait data (QTL etc)NHGRI-EBI GWAS Catalog, Genotype-Tissue Expression (GTEx), The Human Protein AtlasHeight (quantified trait associated with SNPs in genomic regions)
Electronic health recordsMedical Information Mart for Intensive Care III (MIMIC-III), UK Biobank, IBM Watson HealthAcute kidney injury (recorded diagnosis during ICU stay)
Epidemiological datasetsFramingham Heart Study, National Health and Nutrition Examination Survey (NHANES), Global Burden of Disease Study (GBD)Cardiovascular disease (epidemiological study of risk factors and disease incidence)
Clinical trial datasetsClinicalTrials.gov, European Union Clinical Trials Register (EUCTR), International Clinical Trials Registry Platform (ICTRP)Treatment response (clinical trial data on efficacy and safety outcomes)
Environmental exposure datasetsEnvironmental Protection Agency Air Quality System (EPA AQS), Global Historical Climatology Network (GHCN), National Centers for Environmental Information Climate Data Online (NCEI CDO)Respiratory diseases (association with air pollutant exposure)
Population surveys e.g., UK BiobankUK Biobank, National Health Interview Survey (NHIS), National Health and Nutrition Examination Survey (NHANES)Chronic diseases (population-based study on disease prevalence and risk factors)
Behavioral observation datasetsNational Survey on Drug Use and Health (NSDUH), Add Health, British Cohort Study (BCS)Substance abuse disorders (survey data on drug consumption and addiction)
+

Important relationships wrt to phenotype data

+
    +
  • inheres in / characteristic of
  • +
  • bearer of
  • +
+

Types of phenotype data

+
    +
  • Precoordinated phenotype
  • +
  • Post-coordinated phenotype
  • +
  • Attribute-measurement
  • +
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/tutorials/grouping/index.html b/tutorials/grouping/index.html new file mode 100644 index 00000000..6a22dd1e --- /dev/null +++ b/tutorials/grouping/index.html @@ -0,0 +1,1382 @@ + + + + + + + + + + + + + + + + + + + + + + Using uPheno for Grouping Data - Unified Phenotype Ontology + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file