encoding: UTF-8
RUBY_ENGINE | = | ::RUBY_ENGINE | alias the RUBY_ENGINE constant inside the Asciidoctor namespace | |
ROOT_PATH | = | ::File.dirname ::File.dirname ::File.expand_path __FILE__ | The absolute root path of the Asciidoctor RubyGem | |
DATA_PATH | = | ::File.join ROOT_PATH, 'data' | The absolute data path of the Asciidoctor RubyGem | |
USER_HOME | = | ::Dir.home | ||
USER_HOME | = | ::ENV['HOME'] || ::Dir.pwd | ||
COERCE_ENCODING | = | !::RUBY_ENGINE_OPAL && ::RUBY_MIN_VERSION_1_9 | Flag to indicate whether encoding can be coerced to UTF-8 All input data must be force encoded to UTF-8 if Encoding.default_external is not UTF-8 Addresses failures performing string operations that are reported as "invalid byte sequence in US-ASCII" Ruby 1.8 doesn‘t seem to experience this problem (perhaps because it isn‘t validating the encodings) | |
FORCE_ENCODING | = | COERCE_ENCODING && ::Encoding.default_external != ::Encoding::UTF_8 | Flag to indicate whether encoding of external strings needs to be forced to UTF-8 | |
BOM_BYTES_UTF_8 | = | [0xef, 0xbb, 0xbf] | Byte arrays for UTF-* Byte Order Marks | |
BOM_BYTES_UTF_16LE | = | [0xff, 0xfe] | ||
BOM_BYTES_UTF_16BE | = | [0xfe, 0xff] | ||
FORCE_UNICODE_LINE_LENGTH | = | !::RUBY_MIN_VERSION_1_9 | Flag to indicate that line length should be calculated using a unicode mode hint | |
LF | = | EOL = "\n" | The endline character used for output; stored in constant table as an optimization | |
NULL | = | "\0" | The null character to use for splitting attribute values | |
TAB | = | "\t" | String for matching tab character | |
DEFAULT_DOCTYPE | = | 'article' | The default document type Can influence markup generated by the converters | |
DEFAULT_BACKEND | = | 'html5' | The backend determines the format of the converted output, default to html5 | |
DEFAULT_STYLESHEET_KEYS | = | ['', 'DEFAULT'].to_set | ||
DEFAULT_STYLESHEET_NAME | = | 'asciidoctor.css' | ||
BACKEND_ALIASES | = | { 'html' => 'html5', 'docbook' => 'docbook5' | Pointers to the preferred version for a given backend. | |
DEFAULT_PAGE_WIDTHS | = | { 'docbook' => 425 | Default page widths for calculating absolute widths | |
DEFAULT_EXTENSIONS | = | { 'html' => '.html', 'docbook' => '.xml', 'pdf' => '.pdf', 'epub' => '.epub', 'manpage' => '.man', 'asciidoc' => '.adoc' | Default extensions for the respective base backends | |
ASCIIDOC_EXTENSIONS | = | { '.asciidoc' => true, '.adoc' => true, '.ad' => true, '.asc' => true, # TODO .txt should be deprecated '.txt' => true | Set of file extensions recognized as AsciiDoc documents (stored as a truth hash) | |
SETEXT_SECTION_LEVELS | = | { '=' => 0, '-' => 1, '~' => 2, '^' => 3, '+' => 4 | ||
ADMONITION_STYLES | = | ['NOTE', 'TIP', 'IMPORTANT', 'WARNING', 'CAUTION'].to_set | ||
ADMONITION_STYLE_HEADS | = | ['N', 'T', 'I', 'W', 'C'].to_set | ||
CALLOUT_LIST_HEADS | = | ['<', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0'].to_set | ||
PARAGRAPH_STYLES | = | ['comment', 'example', 'literal', 'listing', 'normal', 'pass', 'quote', 'sidebar', 'source', 'verse', 'abstract', 'partintro'].to_set | ||
VERBATIM_STYLES | = | ['literal', 'listing', 'source', 'verse'].to_set | ||
DELIMITED_BLOCKS | = | { '--' => [:open, ['comment', 'example', 'literal', 'listing', 'pass', 'quote', 'sidebar', 'source', 'verse', 'admonition', 'abstract', 'partintro'].to_set], '----' => [:listing, ['literal', 'source'].to_set], '....' => [:literal, ['listing', 'source'].to_set], '====' => [:example, ['admonition'].to_set], '****' => [:sidebar, ::Set.new], '____' => [:quote, ['verse'].to_set], '""' => [:quote, ['verse'].to_set], '++++' => [:pass, ['stem', 'latexmath', 'asciimath'].to_set], '|===' => [:table, ::Set.new], ',===' => [:table, ::Set.new], ':===' => [:table, ::Set.new], '!===' => [:table, ::Set.new], '////' => [:comment, ::Set.new], '```' => [:fenced_code, ::Set.new] | ||
DELIMITED_BLOCK_HEADS | = | DELIMITED_BLOCKS.keys.map {|key| key.slice 0, 2 }.to_set | ||
LAYOUT_BREAK_CHARS | = | { '\'' => :thematic_break, '<' => :page_break | ||
MARKDOWN_THEMATIC_BREAK_CHARS | = | { '-' => :thematic_break, '*' => :thematic_break, '_' => :thematic_break | ||
HYBRID_LAYOUT_BREAK_CHARS | = | LAYOUT_BREAK_CHARS.merge MARKDOWN_THEMATIC_BREAK_CHARS | ||
NESTABLE_LIST_CONTEXTS | = | [:ulist, :olist, :dlist] | LIST_CONTEXTS = [:ulist, :olist, :dlist, :colist] | |
ORDERED_LIST_STYLES | = | [:arabic, :loweralpha, :lowerroman, :upperalpha, :upperroman] | TODO validate use of explicit style name above ordered list (this list is for selecting an implicit style) | |
ORDERED_LIST_KEYWORDS | = | { #'arabic' => '1', #'decimal' => '1', 'loweralpha' => 'a', 'lowerroman' => 'i', #'lowergreek' => 'a', 'upperalpha' => 'A', 'upperroman' => 'I' | ||
ATTR_REF_HEAD | = | '{' | ||
LIST_CONTINUATION | = | '+' | ||
HARD_LINE_BREAK | = | ' +' | NOTE AsciiDoc Python allows + to be preceded by TAB; Asciidoctor does not | |
LINE_CONTINUATION | = | ' \\' | ||
LINE_CONTINUATION_LEGACY | = | ' +' | ||
BLOCK_MATH_DELIMITERS | = | { :asciimath => ['\$', '\$'], :latexmath => ['\[', '\]'], } | ||
INLINE_MATH_DELIMITERS | = | { :asciimath => ['\$', '\$'], :latexmath => ['\(', '\)'], } | ||
FLEXIBLE_ATTRIBUTES | = | ['sectnums'] | attributes which be changed within the content of the document (but not header) because it has semantic meaning; ex. sectnums | |
CIRCUMFIX_COMMENTS | = | { ['/*', '*/'] => ['.css'], ['(*', '*)'] => ['.ml', '.mli', '.nb'], ['<!--', '-->'] => ['.html', '.xhtml', '.xml', '.xsl'], ['<%--', '--%>'] => ['.asp', '.jsp'] | map of file extension to comment affixes for languages that only support circumfix comments | |
CC_ALPHA | = | CG_ALPHA = '\p{Alpha}' | ||
CC_ALNUM | = | CG_ALNUM = '\p{Alnum}' | ||
CC_ALL | = | '.' | ||
CG_BLANK | = | '\p{Blank}' | ||
CC_EOL | = | '$' | ||
CC_WORD | = | CG_WORD = '\p{Word}' | ||
CC_ALPHA | = | '[:alpha:]' | ||
CG_ALPHA | = | '[[:alpha:]]' | ||
CC_ALL | = | '.' | ||
CC_ALNUM | = | '[:alnum:]' | ||
CG_ALNUM | = | '[[:alnum:]]' | ||
CC_EOL | = | '$' | ||
CG_BLANK | = | '[[:blank:]]' | ||
CC_WORD | = | '[:word:]' | ||
CG_WORD | = | '[[:word:]]' | ||
CG_BLANK | = | '[ \t]' | NOTE Ruby 1.8 cannot match word characters beyond the ASCII range; if you need this feature, upgrade! | |
CC_WORD | = | '[:alnum:]_' | ||
CG_WORD | = | '[[:alnum:]_]' | ||
AuthorInfoLineRx | = | /^(#{CG_WORD}[#{CC_WORD}\-'.]*)(?: +(#{CG_WORD}[#{CC_WORD}\-'.]*))?(?: +(#{CG_WORD}[#{CC_WORD}\-'.]*))?(?: +<([^>]+)>)?$/ |
Matches the author info line immediately following the document title.
Examples Doc Writer <doc@example.com> Mary_Sue Brontë |
|
RevisionInfoLineRx | = | /^(?:\D*(.*?),)? *(?!:)(.*?)(?: *(?!^),?: *(.*))?$/ |
Matches the revision info line, which appears immediately following the
author info line beneath the document title.
Examples v1.0 2013-01-01 v1.0, 2013-01-01: Ring in the new year release 1.0, Jan 01, 2013 |
|
ManpageTitleVolnumRx | = | /^(.+?) *\( *(.+?) *\)$/ |
Matches the title and volnum in the manpage doctype.
Examples = asciidoctor(1) = asciidoctor ( 1 ) |
|
ManpageNamePurposeRx | = | /^(.+?) +- +(.+)$/ |
Matches the name and purpose in the manpage doctype.
Examples asciidoctor - converts AsciiDoc source files to HTML, DocBook and other formats |
|
ConditionalDirectiveRx | = | /^(\\)?(ifdef|ifndef|ifeval|endif)::(\S*?(?:([,+])\S*?)?)\[(.+)?\]$/ |
Matches a conditional preprocessor directive (e.g., ifdef, ifndef, ifeval
and endif).
Examples ifdef::basebackend-html[] ifndef::theme[] ifeval::["{asciidoctor-version}" >= "0.1.0"] ifdef::asciidoctor[Asciidoctor!] endif::theme[] endif::basebackend-html[] endif::[] |
|
EvalExpressionRx | = | /^(.+?) *([=!><]=|[><]) *(.+)$/ |
Matches a restricted (read as safe) eval expression.
Examples "{asciidoctor-version}" >= "0.1.0" |
|
IncludeDirectiveRx | = | /^(\\)?include::([^\[][^\[]*)\[(.*)\]$/ |
Matches an include preprocessor directive.
Examples include::chapter1.ad[] include::example.txt[lines=1;2;5..10] |
|
TagDirectiveRx | = | /\b(?:tag|(end))::(\S+)\[\]$/ |
Matches a trailing tag directive in an include file.
Examples // tag::try-catch[] try { someMethod(); catch (Exception e) { log(e); } // end::try-catch[] |
|
AttributeEntryRx | = | /^:(!?\w.*?):(?:[ \t]+(.*))?$/ |
Matches a document attribute entry.
Examples :foo: bar :First Name: Dan :sectnums!: :!toc: :long-entry: Attribute value lines ending in ' \' # are joined together as a single value, # collapsing the line breaks and indentation to # a single space. |
|
InvalidAttributeNameCharsRx | = | /[^\w\-]/ | Matches invalid characters in an attribute name. | |
AttributeEntryPassMacroRx | = | /^pass:([a-z]+(?:,[a-z]+)*)?\[([\S\s]*)\]$/ | In JavaScript, ^ and $ match the boundaries of the string when the m flag is not set | |
AttributeEntryPassMacroRx | = | /\Apass:([a-z]+(?:,[a-z]+)*)?\[(.*)\]\Z/m | ||
AttributeReferenceRx | = | /(\\)?\{(\w+[-\w]*|(set|counter2?):.+?)(\\)?\}/ |
Matches an inline attribute reference.
Examples {foobar} or {app_name} or {product-version} {counter:sequence-name:1} {set:foo:bar} {set:name!} |
|
BlockAnchorRx | = | /^\[\[(?:|([#{CC_ALPHA}_:][#{CC_WORD}:.-]*)(?:, *(.+))?)\]\]$/ |
Matches an anchor (i.e., id + optional reference text) on a line above a
block.
Examples [[idname]] [[idname,Reference Text]] |
|
BlockAttributeListRx | = | /^\[(|[#{CC_WORD}.#%{,"'].*)\]$/ |
Matches an attribute list above a block element.
Examples # strictly positional [quote, Adam Smith, Wealth of Nations] # name/value pairs [NOTE, caption="Good to know"] # as attribute reference [{lead}] |
|
BlockAttributeLineRx | = | /^\[(?:|[#{CC_WORD}.#%{,"'].*|\[(?:|[#{CC_ALPHA}_:][#{CC_WORD}:.-]*(?:, *.+)?)\])\]$/ |
A combined pattern that matches either a block anchor or a block attribute
list.
TODO this one gets hit a lot, should be optimized as much as possible |
|
BlockTitleRx | = | /^\.([^ \t.].*)$/ |
Matches a title above a block.
Examples .Title goes here |
|
AdmonitionParagraphRx | = | /^(#{ADMONITION_STYLES.to_a * '|'}):[ \t]+/ |
Matches an admonition label at the start of a paragraph.
Examples NOTE: Just a little note. TIP: Don't forget! |
|
LiteralParagraphRx | = | /^([ \t]+.*)$/ |
Matches a literal paragraph, which is a line of text preceded by at least
one space.
Examples <SPACE>Foo <TAB>Foo |
|
AtxSectionTitleRx | = | /^(=={0,5})[ \t]+(.+?)(?:[ \t]+\1)?$/ |
Matches an Atx (single-line) section title.
Examples == Foo // ^ a level 1 (h2) section title == Foo == // ^ also a level 1 (h2) section title |
|
ExtAtxSectionTitleRx | = | /^(=={0,5}|#\#{0,5})[ \t]+(.+?)(?:[ \t]+\1)?$/ | Matches an extended Atx section title that includes support for the Markdown variant. | |
SetextSectionTitleRx | = | /^((?=.*#{CG_WORD}+.*)[^.].*?)$/ | Matches the title only (first line) of an Setext (two-line) section title. The title cannot begin with a dot and must have at least one alphanumeric character. | |
InlineSectionAnchorRx | = | / (\\)?\[\[([#{CC_ALPHA}_:][#{CC_WORD}:.-]*)(?:, *(.+))?\]\]$/ |
Matches an anchor (i.e., id + optional reference text) inside a section
title.
Examples Section Title [[idname]] Section Title [[idname,Reference Text]] |
|
InvalidSectionIdCharsRx | = | /&(?:[a-z][a-z]+\d{0,2}|#\d\d\d{0,4}|#x[\da-f][\da-f][\da-f]{0,3});|[^#{CC_WORD}]+?/ |
Matches invalid characters in a section id.
NOTE uppercase chars are not included since the expression is used on a lowercased string |
|
DiscreteHeadingStyleRx | = | /^(?:discrete|float)\b/ |
Matches the block style used to designate a discrete (aka free-floating)
heading.
Examples [discrete] = Discrete Heading |
|
AnyListRx | = | /^(?:[ \t]*(?:-|\*\*{0,4}|\.\.{0,4}|\u2022\u2022{0,4}|\d+\.|[a-zA-Z]\.|[IVXivx]+\))[ \t]|[ \t]*.*?(?::{2,4}|;;)(?:$|[ \t])|<?\d+>[ \t])/ |
Detects the start of any list item.
NOTE we only have to check as far as the blank character because we know it means non-whitespace follows. |
|
UnorderedListRx | = | /^[ \t]*(-|\*\*{0,4}|\u2022\u2022{0,4})[ \t]+(.*)$/ |
Matches an unordered list item (one level for hyphens, up to 5 levels for
asterisks).
Examples * Foo - Foo NOTE we know trailing (.*) will match at least one character because we strip trailing spaces |
|
OrderedListRx | = | /^[ \t]*(\.\.{0,4}|\d+\.|[a-zA-Z]\.|[IVXivx]+\))[ \t]+(.*)$/ |
Matches an ordered list item (explicit numbering or up to 5 consecutive
dots).
Examples . Foo .. Foo 1. Foo (arabic, default) a. Foo (loweralpha) A. Foo (upperalpha) i. Foo (lowerroman) I. Foo (upperroman) NOTE leading space match is not always necessary, but is used for list reader NOTE we know trailing (.*) will match at least one character because we strip trailing spaces |
|
OrderedListMarkerRxMap | = | { :arabic => /\d+\./, :loweralpha => /[a-z]\./, :lowerroman => /[ivx]+\)/, :upperalpha => /[A-Z]\./, :upperroman => /[IVX]+\)/ | Matches the ordinals for each type of ordered list. | |
DescriptionListRx | = | %r(^(?!//)[ \t]*(.*?)(:{2,4}|;;)(?:[ \t]+(.*))?$) |
Matches a description list entry.
Examples foo:: foo::: foo:::: foo;; # the term can be followed by a description on the same line... foo:: That which precedes 'bar' (see also, <<bar>>) # ...or on a separate line (optionally indented) foo:: That which precedes 'bar' (see also, <<bar>>) # the term or description may be an attribute reference {foo_term}:: {foo_def} NOTE negative match for comment line is intentional since that isn‘t handled when looking for next list item TODO check for line comment when scanning lines instead of in regex |
|
DescriptionListSiblingRx | = | { # (?:.*?[^:])? - a non-capturing group which grabs longest sequence of characters that doesn't end w/ colon '::' => %r(^(?!//)[ \t]*((?:.*[^:])?)(::)(?:[ \t]+(.*))?$), ':::' => %r(^(?!//)[ \t]*((?:.*[^:])?)(:::)(?:[ \t]+(.*))?$), '::::' => %r(^(?!//)[ \t]*((?:.*[^:])?)(::::)(?:[ \t]+(.*))?$), ';;' => %r(^(?!//)[ \t]*(.*)(;;)(?:[ \t]+(.*))?$) | Matches a sibling description list item (which does not include the type in the key). | |
CalloutListRx | = | /^<?(\d+)>[ \t]+(.*)$/ |
Matches a callout list item.
Examples <1> Foo NOTE we know trailing (.*) will match at least one character because we strip trailing spaces |
|
CalloutListSniffRx | = | /^<?\d+>/ | Detects a potential callout list item. | |
CalloutExtractRx | = | %r((?:(?://|#|--|;;) ?)?(\\)?<!?(|--)(\d+)\2>(?=(?: ?\\?<!?\2\d+\2>)*$)) |
Matches a callout reference inside literal text.
Examples <1> (optionally prefixed by //, #, -- or ;; line comment chars) <1> <2> (multiple callouts on one line) <!--1--> (for XML-based languages) NOTE extract regexps are applied line-by-line, so we can use $ as end-of-line char |
|
CalloutExtractRxt | = | '(\\\\)?<()(\\d+)>(?=(?: ?\\\\?<\\d+>)*$)' | ||
CalloutScanRx | = | /\\?<!?(|--)(\d+)\1>(?=(?: ?\\?<!?\1\d+\1>)*#{CC_EOL})/ | NOTE special characters have not been replaced when scanning | |
CalloutSourceRx | = | %r((?:(?://|#|--|;;) ?)?(\\)?<!?(|--)(\d+)\2>(?=(?: ?\\?<!?\2\d+\2>)*#{CC_EOL})) | NOTE special characters have already been replaced when converting to an SGML format | |
CalloutSourceRxt | = | "(\\\\)?<()(\\d+)>(?=(?: ?\\\\?<\\d+>)*#{CC_EOL})" | ||
ListRxMap | = | { :ulist => UnorderedListRx, :olist => OrderedListRx, :dlist => DescriptionListRx, :colist => CalloutListRx | A Hash of regexps for lists used for dynamic access. | |
ColumnSpecRx | = | /^(?:(\d+)\*)?([<^>](?:\.[<^>]?)?|(?:[<^>]?\.)?[<^>])?(\d+%?)?([a-z])?$/ |
Parses the column spec (i.e., colspec) for a table.
Examples 1*h,2*,^3e |
|
CellSpecStartRx | = | /^[ \t]*(?:(\d+(?:\.\d*)?|(?:\d*\.)?\d+)([*+]))?([<^>](?:\.[<^>]?)?|(?:[<^>]?\.)?[<^>])?([a-z])?$/ |
Parses the start and end of a cell spec (i.e., cellspec) for a table.
Examples 2.3+<.>m FIXME use step-wise scan (or treetop) rather than this mega-regexp |
|
CellSpecEndRx | = | /[ \t]+(?:(\d+(?:\.\d*)?|(?:\d*\.)?\d+)([*+]))?([<^>](?:\.[<^>]?)?|(?:[<^>]?\.)?[<^>])?([a-z])?$/ | ||
CustomBlockMacroRx | = | /^(#{CG_WORD}+)::(|\S|\S.*?\S)\[(.*)\]$/ |
Matches the custom block macro pattern.
Examples gist::123456[] |
|
BlockMediaMacroRx | = | /^(image|video|audio)::(\S|\S.*?\S)\[(.*)\]$/ |
Matches an image, video or audio block macro.
Examples image::filename.png[Caption] video::http://youtube.com/12345[Cats vs Dogs] |
|
BlockTocMacroRx | = | /^toc::\[(.*)\]$/ |
Matches the TOC block macro.
Examples toc::[] toc::[levels=2] |
|
InlineAnchorRx | = | /(\\)?(?:\[\[([#{CC_ALPHA}_:][#{CC_WORD}:.-]*)(?:, *(.+?))?\]\]|anchor:([#{CC_ALPHA}_:][#{CC_WORD}:.-]*)\[(?:\]|(.*?[^\\])\]))/ |
Matches an anchor (i.e., id + optional reference text) in the flow of text.
Examples [[idname]] [[idname,Reference Text]] anchor:idname[] anchor:idname[Reference Text] |
|
InlineAnchorScanRx | = | /(?:^|[^\\\[])\[\[([#{CC_ALPHA}_:][#{CC_WORD}:.-]*)(?:, *(.+?))?\]\]|(?:^|[^\\])anchor:([#{CC_ALPHA}_:][#{CC_WORD}:.-]*)\[(?:\]|(.*?[^\\])\])/ | Scans for a non-escaped anchor (i.e., id + optional reference text) in the flow of text. | |
InlineBiblioAnchorRx | = | /^\[\[\[([#{CC_ALPHA}_:][#{CC_WORD}:.-]*)(?:, *(.+?))?\]\]\]/ |
Matches a bibliography anchor at the start of the list item text (in a
bibliography list).
Examples [[[Fowler_1997]]] Fowler M. ... |
|
EmailInlineRx | = | %r(([\\>:/])?#{CG_WORD}[#{CC_WORD}.%+-]*@#{CG_ALNUM}[#{CC_ALNUM}.-]*\.#{CG_ALPHA}{2,4}\b) |
Matches an inline e-mail address.
doc.writer@example.com |
|
InlineFootnoteMacroRx | = | /\\?(footnote(?:ref)?):\[(#{CC_ALL}*?[^\\])\]/m |
Matches an inline footnote macro, which is allowed to span multiple lines.
Examples footnote:[text] footnoteref:[id,text] footnoteref:[id] |
|
InlineImageMacroRx | = | /\\?i(?:mage|con):([^:\s\[](?:[^\n\[]*[^\s\[])?)\[(|#{CC_ALL}*?[^\\])\]/m |
Matches an image or icon inline macro.
Examples image:filename.png[Alt Text] image:http://example.com/images/filename.png[Alt Text] image:filename.png[More [Alt\] Text] (alt text becomes "More [Alt] Text") icon:github[large] NOTE be as non-greedy as possible by not allowing endline or left square bracket in target |
|
InlineIndextermMacroRx | = | /\\?(?:(indexterm2?):\[(#{CC_ALL}*?[^\\])\]|\(\((#{CC_ALL}+?)\)\)(?!\)))/m |
Matches an indexterm inline macro, which may span multiple lines.
Examples indexterm:[Tigers,Big cats] (((Tigers,Big cats))) indexterm2:[Tigers] ((Tigers)) |
|
InlineKbdBtnMacroRx | = | /(\\)?(kbd|btn):\[(#{CC_ALL}*?[^\\])\]/m |
Matches either the kbd or btn inline macro.
Examples kbd:[F3] kbd:[Ctrl+Shift+T] kbd:[Ctrl+\]] kbd:[Ctrl,T] btn:[Save] |
|
LinkInlineRx | = | %r((^|link:|#{CG_BLANK}|<|[>\(\)\[\];])(\\?(?:https?|file|ftp|irc)://[^\s\[\]<]*[^\s.,\[\]<])(?:\[(|#{CC_ALL}*?[^\\])\])?)m |
Matches an implicit link and some of the link inline macro.
Examples https://github.com https://github.com[GitHub] <https://github.com> link:https://github.com[] FIXME revisit! the main issue is we need different rules for implicit vs explicit |
|
InlineLinkMacroRx | = | /\\?(?:link|(mailto)):(|[^:\s\[][^\s\[]*)\[(|#{CC_ALL}*?[^\\])\]/m |
Match a link or e-mail inline macro.
Examples link:path[label] mailto:doc.writer@example.com[] NOTE be as non-greedy as possible by not allowing space or left square bracket in target |
|
MacroNameRx | = | /^#{CG_WORD}+$/ | Matches the name of a macro. | |
InlineStemMacroRx | = | /\\?(stem|(?:latex|ascii)math):([a-z]+(?:,[a-z]+)*)?\[(#{CC_ALL}*?[^\\])\]/m |
Matches a stem (and alternatives, asciimath and latexmath) inline macro,
which may span multiple lines.
Examples stem:[x != 0] asciimath:[x != 0] latexmath:[\sqrt{4} = 2] |
|
InlineMenuMacroRx | = | /\\?menu:(#{CG_WORD}|[#{CC_WORD}&][^\n\[]*[^\s\[])\[ *(#{CC_ALL}*?[^\\])?\]/m |
Matches a menu inline macro.
Examples menu:File[Save As...] menu:View[Page Style > No Style] menu:View[Page Style, No Style] |
|
MenuInlineRx | = | /\\?"([#{CC_WORD}&][^"]*?[ \n]+>[ \n]+[^"]*)"/ |
Matches an implicit menu inline macro.
Examples "File > New..." |
|
PassInlineRx | = | { false => ['+', '`', /(^|[^#{CC_WORD};:])(?:\[([^\]]+)\])?(\\?(\+|`)(\S|\S#{CC_ALL}*?\S)\4)(?!#{CG_WORD})/m], true => ['`', nil, /(^|[^`#{CC_WORD}])(?:\[([^\]]+)\])?(\\?(`)([^`\s]|[^`\s]#{CC_ALL}*?\S)\4)(?![`#{CC_WORD}])/m] |
Matches an inline passthrough value, which may span multiple lines.
Examples +text+ `text` (compat) NOTE we always capture the attributes so we know when to use compatible (i.e., legacy) behavior |
|
InlinePassMacroRx | = | /(?:(?:(\\?)\[([^\]]+)\])?(\\{0,2})(\+\+\+?|\$\$)(#{CC_ALL}*?)\4|(\\?)pass:([a-z]+(?:,[a-z]+)*)?\[(|#{CC_ALL}*?[^\\])\])/m |
Matches several variants of the passthrough inline macro, which may span
multiple lines.
Examples +++text+++ $$text$$ pass:quotes[text] NOTE we have to support an empty pass:[] for compatibility with AsciiDoc Python |
|
InlineXrefMacroRx | = | %r(\\?(?:<<([#{CC_WORD}#/.:{]#{CC_ALL}*?)>>|xref:([#{CC_WORD}#/.:{]#{CC_ALL}*?)\[(#{CC_ALL}*?[^\\])?\]))m |
Matches an xref (i.e., cross-reference) inline macro, which may span
multiple lines.
Examples <<id,reftext>> xref:id[reftext] NOTE special characters have already been escaped, hence the entity references NOTE { is included in start characters to support target that begins with attribute reference in title content |
|
HardLineBreakRx | = | /^(.*) \+$/m | NOTE In Ruby, ^ and $ always match start and end of line, respectively; JavaScript only does so in multiline mode | |
HardLineBreakRx | = | /^(.*) \+$/ | ||
MarkdownThematicBreakRx | = | /^ {0,3}([-*_])( *)\1\2\1$/ |
Matches a Markdown horizontal rule.
Examples --- or - - - *** or * * * ___ or _ _ _ |
|
ExtLayoutBreakRx | = | /^(?:'{3,}|<{3,}|([-*_])( *)\1\2\1)$/ |
Matches an AsciiDoc or Markdown horizontal rule or AsciiDoc page break.
Examples ''' (horizontal rule) <<< (page break) --- or - - - (horizontal rule, Markdown) *** or * * * (horizontal rule, Markdown) ___ or _ _ _ (horizontal rule, Markdown) |
|
BlankLineRx | = | /\n{2,}/ |
Matches consecutive blank lines.
Examples one two |
|
DataDelimiterRx | = | /[,;]/ |
Matches a comma or semi-colon delimiter.
Examples one,two three;four |
|
TrailingDigitsRx | = | /\d+$/ |
Matches one or more consecutive digits at the end of a line.
Examples docbook45 html5 |
|
EscapedSpaceRx | = | /\\([ \t\n])/ |
Matches whitespace (space, tab, newline) escaped by a backslash.
Examples three\ blind\ mice |
|
ReplaceableTextRx | = | /[&']|--|\.\.\.|\([CRT]M?\)/ | Detects if text is a possible candidate for the replacements substitution. | |
SpaceDelimiterRx | = | /([^\\])[ \t\n]+/ |
Matches a whitespace delimiter, a sequence of spaces, tabs, and/or
newlines. Matches the parsing rules of %w strings in Ruby.
Examples one two three four five six TODO change to /(?<!\)[ \t\n]+/ after dropping support for Ruby 1.8.7 |
|
SubModifierSniffRx | = | /[+-]/ | Matches a + or - modifier in a subs list | |
UnicodeCharScanRx | = | /./u if FORCE_UNICODE_LINE_LENGTH | ||
UriSniffRx | = | %r(^#{CG_ALPHA}[#{CC_ALNUM}.+-]+:/{0,2}) |
Detects strings that resemble URIs.
Examples http://domain https://domain file:///path data:info not c:/sample.adoc or c:\sample.adoc |
|
UriTerminatorRx | = | /[);:]$/ |
Detects the end of an implicit URI in the text
Examples (http://google.com) >http://google.com< (See http://google.com): |
|
XmlSanitizeRx | = | /<[^>]+>/ | Detects XML tags | |
INTRINSIC_ATTRIBUTES | = | { 'startsb' => '[', 'endsb' => ']', 'vbar' => '|', 'caret' => '^', 'asterisk' => '*', 'tilde' => '~', 'plus' => '+', 'backslash' => '\\', 'backtick' => '`', 'blank' => '', 'empty' => '', 'sp' => ' ', 'two-colons' => '::', 'two-semicolons' => ';;', 'nbsp' => ' ', 'deg' => '°', 'zwsp' => '​', 'quot' => '"', 'apos' => ''', 'lsquo' => '‘', 'rsquo' => '’', 'ldquo' => '“', 'rdquo' => '”', 'wj' => '⁠', 'brvbar' => '¦', 'cpp' => 'C++', 'amp' => '&', 'lt' => '<', 'gt' => '>' | end | |
QUOTE_SUBS | = | { false => quote_subs, true => compat_quote_subs | ||
REPLACEMENTS | = | [ # (C) [/\\?\(C\)/, '©', :none], # (R) [/\\?\(R\)/, '®', :none], # (TM) [/\\?\(TM\)/, '™', :none], # foo -- bar # FIXME this drops the endline if it appears at end of line [/(^|\n| |\\)--( |\n|$)/, ' — ', :none], # foo--bar [/(#{CG_WORD})\\?--(?=#{CG_WORD})/, '—​', :leading], # ellipsis [/\\?\.\.\./, '…​', :leading], # right single quote [/\\?`'/, '’', :none], # apostrophe (inside a word) [/(#{CG_ALNUM})\\?'(?=#{CG_ALPHA})/, '’', :leading], # right arrow -> [/\\?->/, '→', :none], # right double arrow => [/\\?=>/, '⇒', :none], # left arrow <- [/\\?<-/, '←', :none], # left double arrow <= [/\\?<=/, '⇐', :none], # restore entities [/\\?(&)amp;((?:[a-zA-Z][a-zA-Z]+\d{0,2}|#\d\d\d{0,4}|#x[\da-fA-F][\da-fA-F][\da-fA-F]{0,3});)/, '', :bounding] | NOTE in Ruby 1.8.7, [^\] does not match start of line, so we need to match it explicitly order is significant | |
VERSION | = | '1.5.6.1' |
convert | -> | render |
Alias render to convert to maintain backwards compatibility | ||
convert_file | -> | render_file |
Alias render_file to convert_file to maintain backwards compatibility |
Internal: Automatically load the Asciidoctor::Extensions module.
Requires the Asciidoctor::Extensions module if the name is :Extensions. Otherwise, delegates to the super method.
This method provides the same functionality as using autoload on Asciidoctor::Extensions, except that the constant isn‘t recognized as defined prior to it being loaded.
Returns the resolved constant, if resolved, otherwise nothing.
Public: Parse the AsciiDoc source input into an Asciidoctor::Document and convert it to the specified backend format.
Accepts input as an IO, String or String Array object. If the input is a File, information about the file is stored in attributes on the Document.
If the :in_place option is true, and the input is a File, the output is written to a file adjacent to the input file, having an extension that corresponds to the backend format. Otherwise, if the :to_file option is specified, the file is written to that file. If :to_file is not an absolute path, it is resolved relative to :to_dir, if given, otherwise the Document#base_dir. If the target directory does not exist, it will not be created unless the :mkdirs option is set to true. If the file cannot be written because the target directory does not exist, or because it falls outside of the Document#base_dir in safe mode, an IOError is raised.
If the output is going to be written to a file, the header and footer are included unless specified otherwise (writing to a file implies creating a standalone document). Otherwise, the header and footer are not included by default and the converted result is returned.
input - the String AsciiDoc source filename options - a String, Array or Hash of options to control processing (default: {})
String and Array values are converted into a Hash. See Asciidoctor::Document#initialize for details about options.
Returns the Document object if the converted String is written to a file, otherwise the converted String
Public: Parse the contents of the AsciiDoc source file into an Asciidoctor::Document and convert it to the specified backend format.
input - the String AsciiDoc source filename options - a String, Array or Hash of options to control processing (default: {})
String and Array values are converted into a Hash. See Asciidoctor::Document#initialize for details about options.
Returns the Document object if the converted String is written to a file, otherwise the converted String
Public: Parse the AsciiDoc source input into a {Document}
Accepts input as an IO (or StringIO), String or String Array object. If the input is a File, information about the file is stored in attributes on the Document object.
input - the AsciiDoc source as a IO, String or Array. options - a String, Array or Hash of options to control processing (default: {})
String and Array values are converted into a Hash. See {Document#initialize} for details about these options.
Returns the Document
Public: Parse the contents of the AsciiDoc source file into an Asciidoctor::Document
Accepts input as an IO, String or String Array object. If the input is a File, information about the file is stored in attributes on the Document.
input - the String AsciiDoc source filename options - a String, Array or Hash of options to control processing (default: {})
String and Array values are converted into a Hash. See Asciidoctor::Document#initialize for details about options.
Returns the Asciidoctor::Document