Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • sccs/docs.it4i.cz
  • soj0018/docs.it4i.cz
  • lszustak/docs.it4i.cz
  • jarosjir/docs.it4i.cz
  • strakpe/docs.it4i.cz
  • beranekj/docs.it4i.cz
  • tab0039/docs.it4i.cz
  • davidciz/docs.it4i.cz
  • gui0013/docs.it4i.cz
  • mrazek/docs.it4i.cz
  • lriha/docs.it4i.cz
  • it4i-vhapla/docs.it4i.cz
  • hol0598/docs.it4i.cz
  • sccs/docs-it-4-i-cz-fumadocs
  • siw019/docs-it-4-i-cz-fumadocs
15 results
Show changes
#!/bin/bash
for i in {"red","pink","purple","deep purple","indigo","blue","light blue","cyan","teal","green","light green","lime","yellow","amber","orange","deep orange","brown","grey","blue grey"}
do
echo "Setting color to: $i, ${i/ /_}, color_${i/ /_}"
git checkout color_${i/ /_}
git pull --rebase origin colors
git pull --rebase
sed -ri "s/(primary: ').*'/\1$i'/" mkdocs.yml
git commit -am "Setting color to: $i"
git push --set-upstream origin color_${i/ /_}
git checkout colors
done
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" combinations """
from __future__ import print_function
import itertools
import re
CHARS = ['K', 'B', 'D']
MASK = ''.join(reversed(CHARS))
for i in range(1, len(CHARS)+1):
for comb in itertools.combinations(CHARS, i):
REG = "[^%s]" % ''.join(comb)
print(re.sub(REG, "-", MASK))
#!/bin/bash
curl -s https://code.it4i.cz/sccs/it4i-modules/raw/master/dgx.csv -o dgx.csv
curl -s https://code.it4i.cz/sccs/it4i-modules/raw/master/barbora.csv -o barbora.csv
curl -s https://code.it4i.cz/sccs/it4i-modules/raw/master/karolina.csv -o karolina.csv
#!/bin/bash
curl -s https://code.it4i.cz/sccs/it4i-modules/raw/master/barbora.md -o docs.it4i/modules-barbora.md
curl -s https://code.it4i.cz/sccs/it4i-modules/raw/master/dgx.md -o docs.it4i/modules-dgx.md
curl -s https://code.it4i.cz/sccs/it4i-modules/raw/master/karolina.md -o docs.it4i/modules-karolina.md
curl -s https://code.it4i.cz/sccs/it4i-modules/raw/master/cs_aarch64.md -o docs.it4i/modules-cs-aarch64.md
curl -s https://code.it4i.cz/sccs/it4i-modules/raw/master/cs_all_avx2.md -o docs.it4i/modules-cs-avx2.md
curl -s https://code.it4i.cz/sccs/it4i-modules/raw/master/cs_avx512.md -o docs.it4i/modules-cs-avx512.md
curl -s https://code.it4i.cz/sccs/it4i-modules/raw/master/cs_ppc64le.md -o docs.it4i/modules-cs-ppc64lt.md
curl -s https://code.it4i.cz/sccs/it4i-modules/raw/master/cs_el9_aarch64.md -o docs.it4i/modules-cs-el9-aarch64.md
curl -s https://code.it4i.cz/sccs/it4i-modules/raw/master/dgx.csv -o scripts/dgx.csv
curl -s https://code.it4i.cz/sccs/it4i-modules/raw/master/barbora.csv -o scripts/barbora.csv
curl -s https://code.it4i.cz/sccs/it4i-modules/raw/master/karolina.csv -o scripts/karolina.csv
#!/usr/bin/python3
"""
Script to process Markdown files, convert them to MDX,
and add frontmatter based on the first H1 heading.
"""
import os
import re
from pathlib import Path
def process_md_file(md_path):
"""
Converts a Markdown file (.md) to MDX format (.mdx),
adds frontmatter with title from H1 heading, and removes the original file.
Args:
md_path (Path): Path to the Markdown file.
"""
try:
with open(md_path, 'r', encoding='utf-8') as file_handle:
content = file_handle.read()
except UnicodeDecodeError:
print(f"Skipping {md_path} - unable to decode as UTF-8")
return
# Remove existing hide-toc frontmatter
hide_toc_pattern = re.compile(
r'^---\s*\n\s*hide:\s*\n\s*-\s+toc\s*\n---\s*\n*',
flags=re.MULTILINE
)
content = hide_toc_pattern.sub('', content, count=1)
# Remove leading empty lines
content = re.sub(r'^\n+', '', content)
# Process H1 heading
h1_pattern = re.compile(r'^\s*#\s+(.*)$', re.MULTILINE)
match = h1_pattern.search(content)
if match:
title = match.group(1).strip()
frontmatter = f'---\ntitle: "{title}"\n---\n'
# Construct new content with frontmatter at beginning
before_h1 = content[:match.start()].lstrip('\n')
after_h1 = content[match.end():].lstrip('\n')
new_content = frontmatter + before_h1 + after_h1
else:
print(f"No H1 heading found in {md_path}, creating basic frontmatter")
frontmatter = '---\ntitle: ""\n---\n\n'
new_content = frontmatter + content.lstrip('\n')
# Ensure no empty lines after frontmatter
lines = new_content.split('\n')
if len(lines) >= 3 and lines[0] == '---' and lines[2] == '---':
remaining = lines[3:]
while remaining and not remaining[0].strip():
remaining.pop(0)
new_content = '\n'.join(lines[:3] + remaining)
# Create and write MDX file
mdx_path = md_path.with_suffix('.mdx')
with open(mdx_path, 'w', encoding='utf-8') as file_handle:
file_handle.write(new_content)
# Remove original MD file
md_path.unlink()
print(f"Converted {md_path} to {mdx_path}")
def main():
"""Walks through directories and processes all Markdown files."""
for root, dirs, files in os.walk('.'):
dirs[:] = [d for d in dirs if not d.startswith('.')]
for file in files:
if file.endswith('.md'):
md_path = Path(root) / file
process_md_file(md_path)
if __name__ == '__main__':
main()
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" matrix """
from __future__ import print_function
import itertools
CHARS = ['K', 'B', 'D']
MASK = ''.join(reversed(CHARS))
for bits in itertools.product([0, 1], repeat=len(CHARS)):
SBIT = "".join(str(bit) for bit in bits)
NST = ""
for i, _ in enumerate(SBIT):
if SBIT[i] == "1":
NST += MASK[i]
else:
NST += "-"
print(NST)
#!/bin/bash
find content/docs -type d -exec bash -c '
cd "{}" || exit
DIR=$(basename "$PWD")
TITLE=$(echo "$DIR" | sed -E "s/^(.)/\U\1/")
PAGES=$( (ls *.mdx 2>/dev/null | sed "s/\.mdx$//"; ls -d */ 2>/dev/null | sed "s#/##") | sort | jq -R . | jq -s . )
jq -n --arg title "$TITLE" --argjson pages "$PAGES" "{title: \$title, pages: \$pages}" > meta.json
' \;
exit 0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" modules_matrix """
import argparse
import csv
import itertools
import json
import os.path
def arg_parse():
"""
Argument parser
"""
parser = argparse.ArgumentParser(
description="Module_matrix"
)
parser.add_argument('--json',
action='store_true',
help="get json")
return parser.parse_args()
def get_data(filename):
"""Function to read the data from the input CSV file to use in the analysis"""
reader = [] # Just in case the file open fails
with open(filename, 'r') as fdata:
reader = csv.reader(fdata, delimiter=',')
return list(reader) # Only return the reader when you have finished.
def get_datalist():
"""Get data list"""
datalist = []
for name in ['karolina', 'dgx', 'barbora']:
path = os.path.join('scripts', f"{name}.csv")
datalist += get_data(path)
return datalist
def get_counts(datalist):
"""Get counts"""
counts = {}
for i in datalist:
counts[i[0]] = counts.get(i[0], 0) + int(i[1])
return counts
def get_matrix():
"""Get matrix"""
chars = ['K', 'B', 'D']
arr = []
mask = ''.join(reversed(chars))
for bits in itertools.product([0, 1], repeat=len(chars)):
sbit = "".join(str(bit) for bit in bits)
nst = ""
for i, _ in enumerate(sbit):
if sbit[i] == "1":
nst += mask[i]
else:
nst += "-"
arr.append(nst)
return arr
def get_software(datalist):
"""Get software"""
matrix = get_matrix()
counts = get_counts(datalist)
software = {}
prev = ''
for mat, i in sorted(counts.items()):
split = mat.split('/')
if len(split) > 1:
if split[0] != prev:
software[split[0]] = {}
software[split[0]][split[1]] = '`' + matrix[i] + '`'
prev = split[0]
return software
def packages_json(software):
"""Packages JSON"""
packages = {}
for module in sorted(software.items(), key=lambda i: i[0].lower()):
packages[module[0]] = sorted(module[1])[-1]
data = {'total': len(packages), 'projects': packages}
return data
def print_software(software):
"""Print software"""
for module in sorted(software.items(), key=lambda i: i[0].lower()):
software_name = module[0]
versions = []
clusters = []
for key in sorted(module[1].keys()):
versions.append(key)
clusters.append(module[1][key])
print(f"| {software_name} | {'<br>'.join(versions)} | {'<br>'.join(clusters)} |")
print()
print('---8<--- "modules_matrix_search.md"')
def print_hint():
"""Print hint"""
print('!!! Hint "Cluster Acronyms"')
print(' ```')
print(' D B K')
print(' | | |')
print(' | | +----> Karolina')
print(' | +------> Barbora')
print(' +--------> DGX')
print(' ```')
print()
print("| Module <br><form><input id=\"searchInput\" placeholder=\"🔍 Filter\""
" style=\"width: 8rem; border-radius: 0.2rem; color: black;"
" padding-left: .2rem;\"></form> | Versions | Clusters |")
print("| ------ | -------- | -------- |")
def print_changelog():
"""Print changelog"""
print('**Modules Changelog**<br>')
print('You can see the modules changelog for each supercomputer here:<br>')
print('[DGX modules changelog][1]<br>')
print('[Barbora modules changelog][2]<br>')
print('[Karolina modules changelog][3]<br>')
def print_links():
"""Print links"""
print('[1]: https://code.it4i.cz/sccs/it4i-modules/-/blob/master/dgx-changelog.md')
print('[2]: https://code.it4i.cz/sccs/it4i-modules/-/blob/master/barbora-changelog.md')
print('[3]: https://code.it4i.cz/sccs/it4i-modules/-/blob/master/karolina-changelog.md')
def main():
"""Main function"""
arg = arg_parse()
datalist = get_datalist()
software = get_software(datalist)
if arg.json:
print(json.dumps(packages_json(software)))
else:
print_changelog()
print_hint()
print_software(software)
print_links()
if __name__ == "__main__":
main()
#!/bin/bash
find . -type f -not -name '*.md' -print0 | while IFS= read -r -d '' file; do
# Strip leading ./ from the file path
relative_path="${file#./}"
# Create target directory path
target_dir="../public/it4i/$(dirname "$relative_path")"
# Create directory structure if it doesn't exist
mkdir -p "$target_dir"
# Move the file
mv "$file" "$target_dir"
done
#!/bin/bash
for i in $(find ../public/it4i -type f -printf "%P\n" | grep -v .gitkeep) ; do
short=$(echo $i | sed -e 's/.*\/\([^\/]*\)\/\([^\/]*\)$/\1\/\2/');
for f in $(grep -rl "$short)"); do
sh=$(echo $short | sed -e 's/\//\\\//g')
ie=$(echo $i | sed -e 's/\//\\\//g')
sed -i "s/(.*$sh)/(\/it4i\/$ie)/" $f
done
done
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pylint: disable=C0301, R1710
""" titlemd """
import argparse
import sys
try:
from titlecase import titlecase
except ImportError:
print("Please install titlecase")
def arg_parse():
"""
Argument parser
"""
parser = argparse.ArgumentParser(
description="Titlemd"
)
parser.add_argument('-t', '--test',
action='store_true',
help="test")
parser.add_argument('location',
nargs='?',
default='.',
help="location, default current directory")
return parser.parse_args()
def mkdocs_available(location):
""" Is mkdocs.yml available? """
return "mkdocs.yml" in location
def linestart(line, disabled, test, prev_line=None):
""" linestart """
if test:
if (line.startswith("``") or line.startswith("extra:")) and not disabled:
return True
if (line.startswith("``") or (prev_line and prev_line.startswith("pages:"))) and disabled:
return False
else:
if line.startswith("``") and not disabled:
return True
if line.startswith("``") and disabled:
return False
return disabled
def testdata(arg):
""" test """
# Spelling exceptions
with open('.spelling', encoding='utf-8') as fdata:
spelling = fdata.readlines()
def abbreviations(word, **_):
""" abbreviations """
if word + "\n" in spelling:
return word
# Open the file and read the lines as a list
with open(arg.location, encoding='utf-8') as fdata:
lines = fdata.readlines()
# Loop through the list of lines and titlecase
# any line beginning with '#'.
return_value = 0
prev_line = lines[0] if lines else ""
echo_filename = False
disabled = mkdocs_available(arg.location)
for line in lines:
disabled = linestart(line, disabled, arg.test, prev_line)
if line.startswith('#') and not disabled and not mkdocs_available(arg.location):
title_line = titlecase(line[:line.find("]")], callback=abbreviations) + line[line.find("]"):]
if line != title_line:
if return_value == 0 and not echo_filename:
print(f"{arg.location}")
echo_filename = True
print(f"-{line}", end="")
print(f"+{title_line}", end="")
print()
return_value = 1
if (line.startswith('---') or line.startswith('===')) and not disabled:
title_prev_line = titlecase(prev_line[:prev_line.find("]")], callback=abbreviations) + prev_line[prev_line.find("]"):]
if prev_line != title_prev_line:
if return_value == 0 and not echo_filename:
print(f"{arg.location}")
echo_filename = True
print(f"-{prev_line}", end="")
print(f"+{title_prev_line}", end="")
print()
return_value = 1
if (mkdocs_available(arg.location) and not line.startswith('#') and not disabled):
title_line = titlecase(line[:line.find(":")], callback=abbreviations) + line[line.find(":"):]
if line != title_line:
if return_value == 0 and not echo_filename:
print(f"{arg.location}")
echo_filename = True
print(f"-{line}", end="")
print(f"+{title_line}", end="")
print()
return_value = 1
prev_line = line
return return_value
def writedata(arg):
""" writedata """
# Spelling exceptions
with open('.spelling', encoding='utf-8') as fdata:
spelling = fdata.readlines()
def abbreviations(word, **_):
""" abbreviations """
if word + "\n" in spelling:
return word
# Open the file and read the lines as a list
with open(arg.location, encoding='utf-8') as fdata:
lines = fdata.readlines()
with open(arg.location, 'w', encoding='utf-8') as fdata:
prev_line = lines[0] if lines else ""
disabled = False
for line in lines:
disabled = linestart(line, disabled, arg.test)
if line.startswith('#') and not disabled:
line = titlecase(line[:line.find("]")], callback=abbreviations) + line[line.find("]"):]
if (line.startswith('---') or line.startswith('===')) and not disabled:
prev_line = titlecase(prev_line[:prev_line.find("]")], callback=abbreviations) + prev_line[prev_line.find("]"):]
fdata.write(prev_line)
prev_line = line
fdata.write(prev_line)
def main():
"""
main function
"""
arg = arg_parse()
if arg.test:
sys.exit(testdata(arg))
else:
writedata(arg)
if __name__ == "__main__":
main()
#!/usr/bin/python3
"""
Script to check internal links and section references in MDX files.
"""
import re
from pathlib import Path
def extract_links(content):
"""
Extract all internal links from the file.
:param content: The content of the MDX file.
:return: A list of internal links.
"""
link_pattern = re.compile(r'\[.*?\]\((?!http)(.*?)\)') # Everything except http/https
return link_pattern.findall(content)
def extract_headers(content):
"""
Extract all H1-H6 headers for hash reference checks.
:param content: The content of the MDX file.
:return: A set of headers formatted as hash links.
"""
header_pattern = re.compile(r'^(#+)\s*(.*)', re.MULTILINE)
return {f"#{match[1].lower().replace(' ', '-')}" for match in header_pattern.findall(content)}
def check_internal_links(directory):
"""
Check the existence of files and hash sections for internal links.
:param directory: The directory containing MDX files.
"""
mdx_files = {f.relative_to(directory): f for f in Path(directory).rglob("*.mdx")}
file_headers = {}
# Extract headers from each file
for mdx_file, path in mdx_files.items():
with open(path, "r", encoding="utf-8") as file:
content = file.read()
file_headers[mdx_file] = extract_headers(content)
# Check internal links
for mdx_file, path in mdx_files.items():
with open(path, "r", encoding="utf-8") as file:
content = file.read()
links = extract_links(content)
for link in links:
parts = link.split("#")
file_part = parts[0] if parts[0] else mdx_file
hash_part = f"#{parts[1]}" if len(parts) > 1 else None
file_target = (Path(mdx_file).parent / file_part).resolve()
# Check if the file exists
if file_part and file_target not in mdx_files.values():
print(f"❌ Broken file link in {mdx_file}: {link}")
# Check if the section exists
elif hash_part and hash_part not in file_headers.get(file_part, set()):
print(f"⚠️ Broken section link in {mdx_file}: {link}")
if __name__ == "__main__":
check_internal_links("content/docs")
#!/usr/bin/python3
"""
Script to check external links in MDX files.
"""
import re
from pathlib import Path
import requests
def check_links_in_mdx(directory):
"""
Scans MDX files in the given directory for external links and checks their availability.
:param directory: Path to the directory containing MDX files.
"""
mdx_files = Path(directory).rglob("*.mdx")
url_pattern = re.compile(r'\[.*?\]\((http[s]?://.*?)\)')
for mdx_file in mdx_files:
with open(mdx_file, "r", encoding="utf-8") as file:
content = file.read()
links = url_pattern.findall(content)
for link in links:
try:
response = requests.head(link, allow_redirects=True, timeout=5)
if response.status_code >= 400:
print(
f"❌ Broken link in {mdx_file}: {link} "
f"(Status: {response.status_code})"
)
except requests.RequestException:
print(f"⚠️ Error checking {link} in {mdx_file}")
if __name__ == "__main__":
check_links_in_mdx("content/docs")
$$
MAX\_FAIRSHARE * ( 1 - \frac{usage_{Project}}{usage_{Total}} )
$$
$$
10^8*queue\_priority + 10^7*fairshare\_priority + 10^5*job\_age
$$
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
config: ["MMLorHTML.js"],
jax: ["input/TeX", "output/HTML-CSS", "output/NativeMML"],
extensions: ["tex2jax.js", "MathMenu.js", "MathZoom.js"],
tex2jax: {
inlineMath: [ ["\\(","\\)"] ],
displayMath: [ ["\\[","\\]"] ]
},
TeX: {
TagSide: "right",
TagIndent: ".8em",
MultLineWidth: "85%",
equationNumbers: {
autoNumber: "AMS",
}
},
displayAlign: 'left',
showProcessingMessages: false,
messageStyle: 'none'
});
</script>
<script src="https://cdn.mathjax.org/mathjax/latest/MathJax.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.1.1/jquery.slim.min.js" integrity="sha256-/SIrNqv8h6QGKDuNoLGA4iret+kyesCkHGzVUUV0shc=" crossorigin="anonymous"></script>
<script>
// override to case insensitive search
$.expr[":"].contains = $.expr.createPseudo(function(arg) {
return function( elem ) {
return $(elem).text().toUpperCase().indexOf(arg.toUpperCase()) >= 0;
};
});
$("#searchInput").keyup(function () {
//split the current value of searchInput
var data = this.value.split(" ");
//create a jquery object of the rows
var jo = $("tbody").find("tr");
if (this.value == "") {
jo.show();
return;
}
//hide all the rows
jo.hide();
//Recusively filter the jquery object to get results.
jo.filter(function (i, v) {
var $t = $(this);
for (var d = 0; d < data.length; ++d) {
if ($t.is(":contains('" + data[d] + "')")) {
return true;
}
}
return false;
})
//show the rows that match.
.show();
}).focus(function () {
this.value = "";
$(this).css({
"color": "black"
});
}).css({
"color": "#C0C0C0"
});
</script>