« Auto-éditer un wikilivre/addappendix/reconstruction et tests du script addappendix » : différence entre les versions

→‎script addapendix.sh : WL:RD : * diverses retouches
(→‎script addapendix.sh : WL:RD : * diverses retouches)
#H Created : 220113 by GC
#H Updated : 220221 by GC for page ScliC
#O Organizational chart
#O -------------------------------
#P Programmers notes
#P -------------------------------
#P . gettext for translation
. gettext.sh
#O Script begin here
#O If parameters is empty
if test -z $1
#O Then print the short syntax ant exit -1
echo -n -e "\033[31m"
#O If firt parameter is '?'
if [ "$1" = "?" ]
#O Then print syntax whih examples and exit 0
echo -n -e "\033[32m"
exit 0
#O IF first parameter is "--v"
if [ "$1" = "--v" ]
#O Then print addapendix version
echo -n -e "\033[32m"
#O *** First parameter analysis ***
#T echo "$1"
#O Test if the first parameter points to wikibooks.org/wiki
if echo $1 | grep wikibooks.org/wiki
exit -1
#O Check if $1 file exist
if wget --spider $1 2>/dev/null; then
echo -n -e "\033[32m"
#O Find the bookname
echo $1 | awk -F"/" ' { print $NF }' > bookname.txt
read Bookname<bookname.txt
echo; echo -n -e "\033[1;32m"
echo -n $"Book name : $Bookname"
echo -e "\033[0m"
echo $1 | awk -F"/" ' { print $3 }' > site.txt
read Site<site.txt
echo; echo -n -e "\033[1;32m"
echo -n $"Site name : $Site"
echo -e "\033[0m"
echo -e "\033[0m"
#O Create Bookname directory
install -d ~/Add_appendix/books/$Bookname
#O Create temp directory in Wordir
mkdir -p $Workdir/temp
#O ============================================================================
##O Create the file bookname.suffix
#T ***********************
#O Create $Projectdir/resources/TMP to download
mkdir -p $Projectdir/resources/TMP
#O Download $1
cd $Projectdir/resources/TMP
rm -Rf $Projectdir/resources/TMP/* 2> /dev/null
if [ "$Filename" = "filename.txt" ]; then echo $"line 113: \$Filename = filename.txt error, exit -1"; exit -1; fi
rm ../filename.txt
#O go up in the directory resources and rename 'TMP' '$Filename'
cd ..
if test -e $Filename; then rm -R $Filename; fi
mv TMP $Filename
cd $Filename
#T ls -al
cat $Filename|grep "<li><a href=">extract-li
cat extract-li | sed "s/title=\"/\n[[/g" | grep -v "<li><a href=" |sed "s/\">/]]\n/g"|grep -v "</a>\|Cat\|<div" >extract-li1
cat extract-dd1 > $Bookname.$Suffix
cat extract-li1 >> $Bookname.$Suffix
#T echo "$Bookname.$Suffix = "
cp $Bookname.$Suffix $Projectdir/$Bookname.$Suffix
#T ***********************
if test -e $Projectdir/$Bookname.$Suffix
cat $Projectdir/$Bookname.$Suffix
echo "Dowload $1"
#O ============================================================================
#O Download the book in html form
#O Télécharger le site récursivement avec une profondeur infinie ( -linf ), \
#O convertit les liens pour une consultation en local ( -k ), \
#O rapatrie tous les fichiers nécessaires à l'affichage convenable d'une page HTML ( -p ) \
#O et renomme toutes les pages HTML avec l'extension .html ( -E )
echo; echo "Dowload $1"
wget -r -linf -k -p -E "$1" -o $Workdir/temp/wget-log-télécharger.txt
echo "$(gettext '" Found Compiled page : ')$Projectdir/$Bookname.compiled"; echo
echo " Create $Projectdir/$Bookname.list with :"; echo " $Projectdir/$Bookname.compiled"; echo
cat "$Projectdir/$Bookname.compiled" | sed -f $Datasdir/content_cleaner.dat > $Projectdir/$Bookname.compiled.cleaned
cat "$Projectdir/$Bookname.compiled.cleaned" | grep -v '=' | sed "s/\[\[/https:\/\/$Site\/wiki\//g" | sed "s/\]\]//g" | grep "wiki" | tr ' ' '_' | cut -d '|' -f1 > $Projectdir/$Bookname.list
#T Print $Projectdir/$Bookname.prj
cat $Projectdir/$Bookname.prj
#O Download the complete book structure in project directory
cd $Projectdir
echo $"download all sub-directories of the book '$Bookname'"
wget -r -linf -k -p -E -i $Bookname.list -o $Workdir/temp/wget-log-download.txt
echo "----------"
#T Testspoint exit 0
#O Move the html pages into working sub-directories to document the pages and sub pages
#O create a local list to the downloaded directories $ Projectdir/$1.locale.list
echo "create the complete concatenated hierarchy of the directories of the book '$Bookname'"
cat $Projectdir/$Bookname.list | sed "s/https:\/\///g" | sed "s/\ /\\\ /g" | tr '\n' ',' > $Projectdir/$Bookname.locale.list
cat $Projectdir/$Bookname.locale.list
echo "----------"
#O Copy the html files to respective directories
#O Create a file with the pagename $Projectdir/$Bookname.mainPage
echo $"Create the page from the local link to the main page, 'the book'"
cat $Projectdir/$Bookname.locale.list | sed "s/ /\\ /g" | cut -d ',' -f1 > $Projectdir/$Bookname.mainPage
echo "----------"
#O Initialize the variable $mainPage
read mainPage < $Projectdir/$Bookname.mainPage
echo "variable mainPage = $mainPage"
#T cat $Projet/$1.mainPage | awk -F"/" '{print NF}' > nbchamps
#T read NbChamps < nbchamps
#T echo "Variable NbChamps = $NbChamps"
echo "----------"
#O Create a file of the working directories to be created.
ls "$mainPage" | sed "s/.html//g" > $Projectdir/$Bookname.dirs
echo "sub-working-diectories : "
cat $Projectdir/$Bookname.dirs
echo "----------"
#O Copy the html pages and subpages in the respective directories
while read line
rm source ; rm destination
#O ============================================================================
#O Create variable PageSclt
#O File creation '$Bookname.sclt' and print the contents.
echo "----------"
echo $"$(gettext '= Appendix = ')" > $PageSclt
echo >> $PageSclt
echo $"== References == " >> $PageSclt
#O Add <references />
echo $"$(gettext '== References<references ==/> ')" >> $PageSclt
echo "$(gettext '<references /> ')" >> $PageSclt
echo >> $PageSclt
echo "<div style='page-break-before:always'></div>" >> $PageSclt
echo $"== Contents == " >> $PageSclt
#O Add the link to printable book and to articles.
echo "$(gettext '== Contents == ')" >> $PageSclt
echo "<div style='font-zize:85%'>" >> $PageSclt
cat $Projectdir/$Bookname.list | tr ' ' '_' | tr '\n' '%' | sed "s/%/\n\n/g" >> $PageSclt
#0 Add the link to the source of this edition.
echo $"$(gettext '=== Source for this edition === ')" >> $PageSclt
echo "<div style='font-zize:85%';>" >> $PageSclt
echo -n "https://" >> $PageSclt
cat $Projectdir/$Bookname.mainPage | sed "s/\\\ /_/g" >> $PageSclt
#P other version : cat $Projectdir/Bookname".list" | tr ' ' '_' | tr '\n' '%' | sed "s/%/%\n/g" | grep $1% | tr -d % >> $PageSclt
echo "</div>" >> $PageSclt
echo " " >> $PageSclt
echo "<div style='page-break-before:always'></div>" >> $PageSclt
echo $"== Articles Sources, and contributors == " >> $PageSclt
#O Create section 'Article', 'Source', 'License', 'Contributors(?)'
echo "$(gettext '== Articles Sources, and contributors == ')" >> $PageSclt
#O add the text : style PediaPress or personalized.
#O The ''sources'' listed for each article provide more detailled licencing
#O information including the copyright status, the copyleft owner and the license conditions.
echo -n "<span style='font-zize:85%';>" >> $PageSclt
echo $"$(gettext 'The ''sources'' listed for each article provide more detailled licencing information including the copyright status, the copyleft owner and the license conditions..</span> ')" >> $PageSclt
#O or, validate one or the other of these texts :
# echo $"The texts are available with their respective licenses, however other terms may apply.<br />See the terms of use for more details : <br />https://wikimediafoundation.org/wiki/Conditions_d'utilisation.</span>" >> $PageSclt
echo " " >> $PageSclt
echo "----------"
#O Create or recreate the list-file $Projectdir/$1.pj
cat $Projectdir/$Bookname.list | awk -F"/" '{ print $NF }' > $Projectdir/$Bookname.pj
echo "Pjlist : "$PjList
#O While exist line in file $PjList ,
while read line
#O Print the line read,
echo $"$(gettext ' line read = ')"$line"
#O Extract and copy all strings from the html file
#O $line.html in the file $line.str and add to screen
#T pwd
mkd -pws '**' "$line/$line.html" $Projectdir/$line/$line.tmp | tr ',' '\n' > $Projectdir/$line/$line.str
#T break
#O Create the documentation file of pages :
echo "*** References : articles, src, lic, contrib. "
#O Print article,
if [ $line != $Bookname ]
cat $Projectdir/$line/$line.article
echo -n $", ''source :'' https://"$Site"/w/index.php?oldid=" > $Projectdir/$line/$line.RevisionId
#O Print source,
echo -n "$(gettext ', ''source :'' ')https://"$Site"/w/index.php?oldid=" > $Projectdir/$line/$line.RevisionId
cat $Projectdir/$line/$line.str | grep -n -m 1 -i -e wgRevisionId | tr -d ':' | sed "s/\"/%/g" | cut -d'%' -f3 >> $Projectdir/$line/$line.RevisionId
if [ "$line" != "$Bookname" ]
#P license du bas de page :
#P <li id="footer-info-copyright">Les textes sont disponibles sous <a href="https://creativecommons.org/licenses/by-sa/3.0/">license Creative Commons attribution partage à l’identique</a> ; d’autres termes peuvent s’appliquer.<br/>
#P Voyez les <a href="https://wikimediafoundation.org/wiki/Conditions_d'utilisation">termes d’utilisation</a> pour plus de détails.<br/></li>
#P Print license :
#P <link rel="license" href="https://creativecommons.org/licenses/by-sa/3.0/"/>
#T echo -n $", ''Copyrightlicense :'' " >> ArticleUn$Projectdir/$line/$line.tmplicense
#T cat ArticleUn.str | grep -n -m 1 -i -e license | sed "s/\"\//%\//g" | cut -d'%' -f2 |sed "s/\/\//https:\/\//g" >> ArticleUn.tmp
#O Print license :
echo -n "$(gettext ', ''license :'' ')" > $Projectdir/$line/$line.license
#T cat $Projectdir/$line/$line.str | grep -n -m 1 -i -e license | sed "s/\"\//%\//g" | cut -d'%' -f4 >> $Projectdir/$line/$line.license
cat $Projectdir/$line/$line.str | grep -n -m 1 -i -e license | sed "s/\"\//%\//g" | tr '"' '%' | cut -d'%' -f4 >> $Projectdir/$line/$line.license
#T cat $Projectdir/$line/$line.str | grep -n -m 1 -i -e license | sed "s/\"\//%\//g" | cut -d'%' -f2 | sed "s/\/\//https:\/\//g" >> $Projectdir/$line/$line.license
if [ $line != $Bookname ]
cat $Projectdir/$line/$line.license
#P spécial pour bas de page fr ##
cat $Projectdir/$line/$line.str | grep -n -m 1 -i -e footer-info-copyright | sed "s/\"\//%\//g" | tr '"' '%' | cut -d'%' -f4 > $Projectdir/$line/$line.license
echo -n $", ''author : '' " > $Projectdir/$line/$line.author
#O Author(s).
echo -n "$(gettext ', ''author : '' ')" > $Projectdir/$line/$line.author
cat $Projectdir/$line/$line.str | grep -n -m 1 -i -e wgRelevantUserName | sed "s/\"/%/g" | cut -d'%' -f4 > tmp
if test -s tmp
if wget --spider https://xtools.wmflabs.org/articleinfo/en.wikibooks.org/$line 2>/dev/null
echo $"$(gettext '. -see :') https://xtools.wmflabs.org/articleinfo/$Sitename/$line" >> $Projectdir/$line/$line.author
elif wget --spider https://xtools.wmflabs.org/articleinfo/$Sitename/$Bookname/$line 2>/dev/null
echo $"$(gettext '. -see :') https://xtools.wmflabs.org/articleinfo/$Sitename/$Bookname/$line" >> $Projectdir/$line/$line.author
cat $Projectdir/$line/$line.str | grep -n -m 1 -i -e wgRelevantPageName | sed "s/\"/%/g" | cut -d'%' -f4 > tmp
if test -s tmp
#T echo -n $"&action=. -see ''contributors'' in book, history page of " >> tmp$Projectdir/$line/$line.author
echo -n "$(gettext '. -see ''contributors'' in book, history page of ')" >> $Projectdir/$line/$line.author
cat tmp >> $Projectdir/$line/$line.author; rm tmp
echo >> $PageSclt
#O end of while.
done < $Pjlist
#O Add end of div and pagre break
echo "</div>" >> $PageSclt
echo "<div style='page-break-before:always'></div>" >> $PageSclt
#P Création de la page Bookname.scli (sources contributeurs licenses des images)
#0 Initialisation de la variable d'entête des fichiers scli.
echo > $Headscli
#O Afficher le nom du fichier Headscli à la console
#O Show Headscli filename to console
echo -n "$Headscli"; echo $" english version"; echo
echo $"== Images sources licenses and contributors ==" > $Headscli
echo -n $"<span style='font-size:85%'>"; echo $"The ''sources'' listed for each illustration provide more detailed licensing information, including copyright status, the holders of these rights and the license conditions.</span>" >> $Headscli
echo " " >> $Headscli
echo "<div style='font-size:72%'>" >> $Headscli
echo >> $Headscli
#T Show the content of file Headscli cat $Headscli; exit 0
#O ============================================================================
#O If the file $Projectdir/$Bookname/$Bookname.str exist, create the PageSclic in classic order
#O Si la page $Projectdir/$Bookname/$Bookname.str existe, créer la page $PageSclic contenant les images dans un ordre classique
if test -e $Projectdir/$Bookname/$Bookname.str
#O Select lines containing 'fichier:', 'file', image and create bookname.files
cat $Projectdir/$Bookname/$Bookname.str | grep -n -i -e fichier: -e file: -e image: > $Projectdir/$Bookname/$Bookname.files
#O Select lines containing 'fichier:', '.jpg', '.png', '.gif' and create bookname.pict
cat $Projectdir/$Bookname/$Bookname.str | grep -n -i -e fichier: -e .jpg -e .png -e .gif > $Projectdir/$Bookname/$Bookname.picts
#O Sélect in bookname.files, the lines containing 'title', remove the tag <div> cut ">" and select the last champ to create bookname.illustrations
cat $Projectdir/$Bookname/$Bookname.files | grep title |sed "s/<\/div>//g" | awk -F">" '{print $NF}' > $Projectdir/$Bookname/$Bookname.illustrations
#O Dans le fichier .files avec le séparateur "=" imprimer dans le champ 'i' le retour chariot, sélectionner le slignes conteneant 'https', remplacer le caractère '"' par '!'
#O et sélectionner le trosième champ, puis relélectionner la ligne contenant https, remplacer le caractère '>' par !, supprimer </a, puis supprimer le caractère '!'
#O et créer le fichier.links
cat $Projectdir/$Bookname/$Bookname.files | awk -F"=" '{for (i=1;i<=NF;i++) print $i "\n"}' | grep https | sed "s/\"/!/g" | cut -d '!' -f3 \
| grep https | tr '>' ! | sed "s/<\/a//g" |sed "s/!//g" > $Projectdir/$Bookname/$Bookname.links
#OF Télécharger les fichiers contenus dans la liste du fichier bookname.links
#O Download the files contained in the list of the bookname.links file
wget -P $Projectdir/$Bookname -r -linf -k -p -E -i $Projectdir/$Bookname/$Bookname.links
#O Copy html liles from ./commons.wikimedia.org/wiki in the curreny directory
cd $Bookname
if test -e commons.wikimedia.org; then cp -R commons.wikimedia.org/wiki/*.html . ; fi
#O html.list initialization
echo -n "" > html.list
if test -s $Projectdir/$Bookname/$Bookname.links
echo $"$Projectdir/$Bookname/$Bookname.links is not empty"
#OF Tant qu'on lit des lignes dans le fichier .links, lire les images et les lister dans html.list
#O As long as there is a line in file html.links, read the line and copy it to html.list
while read line
echo " *** References : image, src, lic, contrib."
#O As long as there is a line in file html.list extract illustrations, sources, licenses, authors(s)
while read line
echo $"$(gettext '**** line = ')$line ****"
cat $line.tmp | tr ',' '\n' > $line.str
echo -n $"'''$(gettext 'Illustration : ')'''" > $line.title
cat $line.str |grep wgTitle | cut -d '"' -f4 >> $line.title
cat $line.title >> $PageSclic
cat $line.title
echo -n $", ''$(gettext ' source : ')''https://"$Site"/w/index.php?title= " > $line.source
echo $line | sed "s/.html//g" >> $line.source
cat $line.source >> $PageSclic
cat $line.source
echo -n $", ''$(gettext ' license : ')''" > $line.license
cat $line.str | grep licensetpl_short | sed "s/<td>//g" | sed "s/<span class//g" | sed "s/<\/span>//g" | sed "s/style=\"display:none;\"//g" | tr '=' '\n' | grep licensetpl_short | awk -F">" '{print $NF}' >> $line.license
cat $line.license >> $PageSclic
cat $line.license
echo -n $", ''$(gettext ' author : ')''" > $line.authors
rm tmp
cat $line.str | grep -i -n -m1 -A 1 -e Author | grep -i -e user -e utilisteur -e auteur | tr '/' '\n' | grep -i -e user -e utilisteur -e auteur | cut -d '"' -f1 > tmp
echo >> $PageSclic
done < html.list
#P bas de la page avant la nouvelle page
echo "</div>" >> $PageSclic
#T echo "$(gettext '{{Newpage}} ')" >> $PageSclic
echo "<div style='page-break-before:always'></div>" >> $PageSclic
#O end of test -e $Projectdir/$Bookname/$Bookname.str
echo -e "\033[31m"
echo $"Can not create $Projectdir/$Bookname/$Bookname.sclic. URL page of book is not found"
echo -e "\033[0m"
#O end of create PageSclic
#O ============================================================================
#O Create variable Pagesclipco
echo $Pagesclipco
#O Wikibooks sclipco personalized page initialization with the title 'Images sources, etc.
cat $Projectdir/$Bookname.scli > $Pagesclipco
cat $Pagesclipco
#O ============================================================================
#O Create an identification loop of the directories corresponding to the articles
#O As long as we can read the lines of the file $Projectdir/$Bookname.pj
while read pjline
#O If the line read is not $Bookname (name of the book)
#T echo "line read : " $line
if [ $pjline != $Bookname ]
#O Then:
#O Enter in the article directory,
cd $Projectdir/$pjline
#O Create image documentation files
#O Open the stream of $ Projectdir/$Bookname/$Bookname.str of the image and select
#O the character strings containing: File:, Image; and put in files
#O $Projectdir/$line/$line/.files, .pict, .illustration, .images, .links
cat $Projectdir/$pjline/$pjline.str | grep -n -i -e Fichier: -e file: -e image: | sed -f $RepCom/$Conversions > $Projectdir/$pjline/$pjline.files
cat $Projectdir/$pjline/$pjline.str | grep -n -i -e fichier: -e .jpg -e .png -e .gif | sed -f $RepCom/$Conversions> $Projectdir/$pjline/$pjline.picts
cat $Projectdir/$pjline/$pjline.files | grep title |sed "s/<\/div>//g" | awk -F">" '{print $NF}' > $Projectdir/$pjline/$pjline.illustrations
#T cat $Projectdir/$pjline/$pjline.files | awk -F"=" '{for (i=1;i<=NF;i++) print $i "\n"}' | grep https | sed "s/\"/!/g" | cut -d '!' -f3 | grep https | tr '>' ! | sed "s/<\/a//g" |sed "s/!//g" > $Projectdir/$pjline/$pjline.links
cat $Projectdir/$pjline/$pjline.files | awk -F"=" '{for (i=1;i<=NF;i++) print $i "\n"}' | grep https://$Site | sed "s/\"/!/g" | cut -d '!' -f2 > $Projectdir/$pjline/$pjline.images
#O Transform the links of the image file on wikibooks into an image file on commons
cat $Projectdir/$pjline/$pjline.images | sed "s/$Site/commons.wikimedia.org/g"| sed "s/Fichier/File/g" > $Projectdir/$pjline/$pjline.commonsimages
#O Download the image files from the wikimedia server.
#P Note: the -N option allows you to avoid downloading an up-to-date file,
#P and without adding a numbering.
#T #T wget -N -P $Projectdir/$pjline -i $Projectdir/$pjline/$pjline.images
wget -P $Projectdir/$pjline -r -linf -k -p -E -i $Projectdir/$pjline/$pjline.commonsimages
#T echo "*** Commonsimages ***"; cat $Projectdir/$pjline/$pjline.commonsimages; exit 0
#O Copy the downloaded images to the directory of the current article..
cp $Projectdir/$pjline/commons.wikimedia.org/wiki/*.html $Projectdir/$pjline/.
#O Initialize the commonshtml.list file with empty text.
echo -n "" > commonshtml.doublons
#O List the image files in the order of printing or display,
#O using the list $Projectdir/$pjline/$pjline.commons.images
#O As long as we can read lines in $Projectdir/$pjline/$pictline.images
while read pictline
#O Cut the lignes at carriage return, sélect the last field and add '.html'
#echo $pictline | awk -F"/" '{for (i=1;i<=NF;i+=2) print $i "\n"}' #| cut -d '%' -f1 | cut -d '.' -f1 > tmp
echo $pictline | awk -F"/" '{ print $NF".html"}' >> commonshtml.doublons
#O Cut the duplicated lines and select even fields.
echo -n "" > commonshtml.list
# print "Fin"
} ' commonshtml.doublons
#O End of while $Projectdir/$pjline/$pjline.commonsimages
done < $Projectdir/$pjline/$pjline.commonsimages
#T Afficher html.list
#T echo "*** commonshtml.list ***"; cat commonshtml.listexit 0
#O Copy article name in file $Bookname.sclipco
echo "'''Article : $pjline'''<br />" >> $Pagesclipco
echo "'''Article : $pjline'''"
#P## Annexe version 'wikimedia commons' ##############################
#O As long as there are (local) links in the commonshtml.list image file
while read htmlline
#O Afficher la ligne lue,
echo ""
echo ""
echo $"$(gettext ' ---- line read = $htmlline --- ')"
echo ""
#O With mkd (sofware), select the character strings from the image file $htmlline
#O and copy them to $ htmlline.co.str after replacing the character ',' with
#O 'new-line'
mkd -pw '**' $htmlline $htmlline.tmp
cat $htmlline.tmp | tr ',' '\n' > $htmlline.co.str
#T echo "***-n $htmlline.co.str"'''Illustration : ***'''"; cat> $htmlline.co.str; exit 0 title
#O images,
echo -n "'''$(gettext ' Illustration : ')'''" > $htmlline.co.title
cat $htmlline.co.str | grep wgTitle | cut -d '"' -f4 >> $htmlline.co.title
cat $htmlline.co.title >> $Pagesclipco
cat $htmlline.co.title
#T echo "*** $htmlline.co.title : ***"; cat $htmlline.co.title; exit 0
#O source,
echo -n $", ''source : ''https://commons.wikimedia.org/wiki/" > $htmlline.co.source
##echo -n ",''$(gettext 'source : ')''https://commons.wikimedia.org/wiki/" > $htmlline.co.source
echo -n $htmlline | sed "s/.html//g" | sed "s/.str//g" >> $htmlline.co.source
if [ "$Site" = "fr.wikibooks.org" ]; then echo "?uselang=fr" >> $htmlline.co.source
cat $htmlline.co.source >> $Pagesclipco
cat $htmlline.co.source
#T echo "***-n $htmlline.co.source", ''license : ***'' "; cat> $htmlline.co.source; exit 0license
#O license,
echo -n ", ''$(gettext 'license : ')'' " > $htmlline.co.license
cat $htmlline.co.str | grep licensetpl_short | sed "s/<td>//g" | sed "s/<span class//g" | sed "s/<\/span>//g" | sed "s/style=\"display:none;\"//g" | tr '=' '\n' | grep licensetpl_short | awk -F">" '{print $NF}' >> $htmlline.co.license
cat $htmlline.co.license >> $Pagesclipco
cat $htmlline.co.license
#T echo "*** $htmlline.co.license : ***"; cat $htmlline.co.license; exit 0
#O authors.
rm -rf tmp
#Pwww echo -n $", ''$(gettext ' author : ')'' " > $htmlline.co.authors
#echoTest -ncat $",tmp ''author : ''" > $htmlline.co.authors"; exit -1
echo -n ", ''author : ''" > $htmlline.co.authors
#Test cat tmp; echo "$htmlline.co.authors"; exit -1
cat $htmlline.co.str | grep -i -n -m1 -A 1 -e Author -e Auteur | tr '/' '\n' | grep -i -e user -e utilisteur -e auteur -e author | cut -d '"' -f1 | grep -i -e user -e utilisteur -e auteur -e author > tmp
if test -s tmp; then echo ; else echo "-" > tmp; fi
cat $htmlline.co.authors >> $Pagesclipco
cat $htmlline.co.authors
#O Finish the page $Pagesclipco
echo "" >> $Pagesclipco
#O End of as long as there are lines in commonshtml.list
done < commonshtml.list
#O End of 'if the line is not the name of book'.
#O End of while line in $Bookname.pj
done < $Projectdir/$Bookname.pj
#O clean intermediate files
rm -rf tmp
#O End of page $Pagesclipco
echo "</div>" >> $Pagesclipco
#T echo "$(gettext ' {{Newpage}} ')" >> $Pagesclipco
echo "<div style='page-break-before:always'></div>" >> $Pagesclipco
#O ============================================================================
#O Creating Bookname.appendix
cat $Projectdir/$Bookname.sclt>$Projectdir/$Bookname.appendix
cat $Projectdir/$Bookname.sclipco>>$Projectdir/$Bookname.appendix
#O ============================================================================
#O Display file Bookname.appendix
echo;echo -e "\033[1;32mcopy and paste the text displayed and add after the book $Bookname.\033[0m"
cat $Projectdir/$Bookname.appendix
exit 0
#O addappendix script end