initial upload

This commit is contained in:
2025-12-17 11:00:57 +08:00
parent 2bc7b24a71
commit a09a73537f
4614 changed files with 3478433 additions and 2 deletions

41
docs/edit_scripts/README.txt Executable file
View File

@@ -0,0 +1,41 @@
Notes for scripts in this directory.
These are very brute force, just walk through all directories and do something.
Output is written to stdout unless the python script is edited to write to a file name.
The root directory is defined in the python script.
All good enough to do some work on the files to fix and reduce files.
Find duplicate .md files and remove old copies
This was done Aug 10 2020 and most were removed.
Most old copies were located in /pages/docs and dir names html.
Most good copies are located in subdirs under /pages/docs/commands or demos
python find_duplicate_files.py
writes find_duplicate_files.out.txt
Find unlinked .md files and either link or remove
Note some files are referenced as .md and some are referenced as .html
python find_unlinked_files.py
writes find_unlinked_files.out.txt
awk '{if ($1 == 0 && $2 == "Total" ) print $0}' find_unlinked_files.out.txt > unlinked.txt
Old scripts orginally written to keep track of file status.
Author Nathan Knapp for conversion from html to markdown pages
python get_site_map.py
writes docs/site_list.md and docs/site_map.md
Currently this does not give information about unused files.
It does not give file and path
Formatting is minimal
It looks like pages/new_md
are duplicates Nathan was using but were not used for final pages.
Check these and remove

View File

@@ -0,0 +1,72 @@
# Search for unlinked .md files in all files
# USAGE: python find_duplicate_files.py
# for every file .md search all files for duplicates
# This is just checking .md files, consider other file types
# MUST SET top_dir
# MUST SET file to open and write results
import os, sys, re
# routine walk through all the directories, search each file
def find_word_walk(top_dir,searchstring):
count = 0
for root, drs, fles in os.walk(top_dir):
for fle in fles:
# searchstring is file name to look for
# infile_name is the full path of searchstring
# froot is the root name of file to look for (will find .md and .html)
# fle is the file to check
if '.md' in fle:
infile_name = os.path.join(root, fle)
fstring = os.path.splitext(fle)
froot = fstring[0]
fhtml = froot+".html"
result = re.match(searchstring,fle)
# print 'check name %s current file: %s' % (searchstring, fle)
if result :
count = count + 1
if count > 1 :
print 'match %s with file %s' % (searchstring, infile_name)
# print '%d duplicate files for %s: ' % (count, searchstring)
return count
# main -----------------------------------------------
searchstring = "notset"
top_dir = '/project/eesdev/tam/clone/LaGriT/docs/'
# write to file instead of stdout
sys.stdout=open("find_duplicate_files.out.txt","w")
# for each .md file find a link in another .md file
for root, drs, fles in os.walk(top_dir):
for fle in fles:
# if '.pdf' in fle:
if '.md' in fle:
searchstring = fle
# print 'Search files for %s: %s' % (searchstring, root)
itotal = find_word_walk(top_dir,searchstring)
if itotal > 1:
print '%d duplicates with %s: %s' % (itotal, searchstring, root)
sys.stdout.close()

View File

@@ -0,0 +1,75 @@
# Search for unlinked .md files in all files
# USAGE: python find_unlinked_files.py
# for every file .md search all files for use of that .md or .html
# If none found, report unlinked file
# TODO:
# This will not catch duplicate file.md if file.md is used somewhere
# This is just checking .md in .md files, consider other file types
# MUST SET top_dir
# MUST SET file to open and write results
import os, sys
# routine walk through all the directories, search each file
# report .md files not found in any .md file
def find_word_walk(top_dir,searchstring):
count = 0
for root, drs, fles in os.walk(top_dir):
for fle in fles:
# searchstring is file to look for
# infile_name is the full path of searchstring
# froot is the root name of file to look for (will find .md and .html)
# infile is the file to look into
if '.md' in fle:
infile_name = os.path.join(root, fle)
with open(infile_name, 'r') as infile:
fstring = os.path.splitext(fle)
froot = fstring[0]
fhtml = froot+".html"
if searchstring in infile.read():
print 'found %s in file %s' % (searchstring, infile_name)
count = count + 1
if fhtml in infile.read():
print 'found %s in file %s' % (fhtml, infile_name)
count = count + 1
infile.close()
print '%d found files with %s: ' % (count, searchstring)
return count
# main -----------------------------------------------
searchstring = "notset"
top_dir = '/project/eesdev/tam/clone/LaGriT/docs/'
# write to file instead of stdout
sys.stdout=open("find_unlinked_files.out.txt","w")
# for each .md file find a link in another .md file
for root, drs, fles in os.walk(top_dir):
for fle in fles:
# if '.pdf' in fle:
if '.md' in fle:
searchstring = fle
print 'Search files for %s: %s' % (searchstring, root)
itotal = find_word_walk(top_dir,searchstring)
print '%d Total files with %s: %s' % (itotal, searchstring, root)
sys.stdout.close()

View File

@@ -0,0 +1,32 @@
# for every file .md search all files for use of that .md
# If none found, report unlinked file
import os, sys
# routine walk through all the directories, search each file
# report .md files not found in any .md file
def list_in_dr(top_dir):
count = 0
for root, drs, fles in os.walk(top_dir):
for fle in fles:
if '.md' in fle:
infile_name = os.path.join(root, fle)
with open(infile_name, 'r') as infile:
if searchstring in infile.read():
print 'found %s in file %s' % (searchstring, infile_name)
count = count + 1
infile.close()
print 'Total files with %s : %d ' % (searchstring, count)
# main
# keyword is filename
searchstring = "FSET.md"
top_dir = '/project/eesdev/tam/clone/LaGriT/docs/pages/'
list_in_dr(top_dir)

View File

@@ -0,0 +1,83 @@
import os, sys
# get the full path of a markdown file
def get_file(link, md_file_list):
for fle in md_file_list:
if link in fle:
return fle
# This function, called recursively, prints out the site map, with formatting based on tags in the file
def print_links(root_fle, depth, max_depth, out_fle, md_file_list, already_linked):
if depth < max_depth:
link_list = []
indent = ''
infile = open(root_fle, 'r')
data = infile.read()
for fle in [i.split('/')[-1] for i in md_file_list]:
if fle in data:
link_list.append(fle)
for i in range(depth):
for j in range(10):
indent += '&nbsp; '
link_list = list(set(link_list) - set(already_linked))
for link in sorted(link_list):
link_fle = get_file(link, md_file_list)
link_fle_in = open(link_fle, 'r').read()
# Default title tags do not exist, crossed out
# if ok then bold
# if needs review italic
# if Title but no review or ok, do bold
tags = '~~'
if 'tags' and 'ok' in link_fle_in:
tags = ''
tags = '***'
if 'Title' in link_fle_in:
tags = ''
tags = '**'
elif 'tags' and 'review' in link_fle_in:
tags = ''
tags = '*'
rel_link = link_fle.split('LaGriT/docs/')[-1][:-3]
out_fle.write(indent + '[' + tags + link[:-3] + tags + ']' + '(' + rel_link + ')' + '\n' + '\n')
already_linked.append(link)
if 'release' not in link_fle:
print_links(link_fle, depth+1, max_depth, out_fle, md_file_list, already_linked)
# CHANGE THIS LINE TO LOCATION OF REPO:
# repo_location = '/Users/nknapp/Desktop/LaGriT/docs/'
repo_location = '/project/eesdev/tam/clone/LaGriT/docs/'
dr = repo_location + 'pages'
md_file_list = []
# Get a list of all the Markdown files in the repository
for root, drs, fles in os.walk(dr):
for fle in fles:
if '.md' in fle:
md_file_list.append(os.path.join(root, fle))
max_depth = 4
out_fle_name = repo_location + 'site_map.md'
start_page = repo_location + 'index.md'
out_fle = open(out_fle_name, 'w')
out_fle.write('[**Home**](index.md)' + '\n' + '\n')
already_linked = []
# print out the website map using a recursive function
print_links(start_page, 0, max_depth, out_fle, md_file_list, already_linked)
# list all files in the website; format based on tags in file
out_fle_name = repo_location + 'site_list.md'
out = open(out_fle_name, 'w')
for fle in sorted(md_file_list):
link_fle_in = open(fle, 'r').read()
tags = '~~'
if 'tags' and 'ok' in link_fle_in:
tags = '**'
elif 'tags' and 'review' in link_fle_in:
tags = '*'
rel_link = fle.split('LaGriT/docs/')[-1][:-3]
out.write('[' + tags + fle.split('/')[-1][:-3] + tags + ']' + '(' + rel_link + ')' + '\n' + '\n')

View File

@@ -0,0 +1,27 @@
# Loop through all source files and find images that are missing minimum formatting
import os, sys
def filter_file(infile_name):
with open(infile_name, 'r') as infile:
for line in infile.readlines():
if 'assets' in line and 'docs' in line:
print infile_name
break
if 'img' in line and not ('<' in line and '>' in line):
print infile_name
break
def list_in_dr(dr):
print 'broken images in these files'
for root, drs, fles in os.walk(dr):
for fle in fles:
if '.md' in fle:
filter_file(os.path.join(root, fle))
dr = '/project/eesdev/tam/clone/LaGriT/docs/pages/'
list_in_dr(dr)

View File

@@ -0,0 +1,23 @@
# list all files that are supposed to have tables in them - useful for finding broken tables
import os, sys
def filter_file(infile_name):
with open(infile_name, 'r') as infile:
for line in infile.readlines():
if '|' in line and '---' in line:
print infile_name
print '--------'
break
def list_in_dr(dr):
for root, drs, fles in os.walk(dr):
for fle in fles:
if '.md' in fle:
filter_file(os.path.join(root, fle))
dr = '/home/nknapp/deknapp.github.io/pages/'
list_in_dr(dr)

26
docs/edit_scripts/replace.py Executable file
View File

@@ -0,0 +1,26 @@
# replace or remove text and formatting that is not wanted in website
import os, sys
def filter_file(infile_name, no, yes):
outfile_name = infile_name[:-3] + '_temp.md'
with open(infile_name, 'r') as infile, open(outfile_name, 'w') as outfile:
data = infile.read()
data = data.replace(no, yes)
outfile.write(data)
os.system('mv ' + outfile_name + ' ' + infile_name)
def replace_in_dr(old, new, dr):
for root, drs, fles in os.walk(dr):
for fle in fles:
if '.md' in fle:
filter_file(os.path.join(root, fle), old, new)
dr = '/Users/nknapp/Desktop/LaGriT/docs/pages/'
no =
yes =
replace_in_dr(no, yes, dr)