confluence

查看executeFileUpload方法

public String executeFileUpload()

    throws Exception{

    MultiPartRequestWrapper request = (MultiPartRequestWrapper)this._request;

    // Struts请求包装过滤器

    File file = null;

    String fileName = null;

    try{

      ArrayList errors = (ArrayList)request.getErrors();

      String maxDocSize;

      if (errors != null) {

        for (int i = 0; i < errors.size(); i++){

          Matcher m = CONTENT_LENGTH_MESSAGE_PATTERN.matcher((String)errors.get(i));

          if (m.matches()) {

            maxDocSize = FileSize.format(Long.parseLong(m.group(2)));

            throw new DocumentToLargeException("Total document size is too large. Reduce total document size to below " + maxDocSize);

          }

        }

      }//当文件大小超过限制时报错

      xsrfValidationResult = this.xsrfTokenService.validate(request);

      if (xsrfValidationResult.isDefined()){

        Message xsrfValidationErrorMessage = (Message)xsrfValidationResult.get();

        super.addActionError(getText(xsrfValidationErrorMessage.getKey(), xsrfValidationErrorMessage.getArguments()));

        return "error";

      }//对异常请求进行校验

      file = request.getFiles("filename")[0];

      fileName = request.getFileNames("filename")[0];

      this._pageTitle = fileName;

      if (fileName.lastIndexOf('.') != -1) {

        this._pageTitle = fileName.substring(0, fileName.lastIndexOf('.'));

      }//获取文档名称

      log.debug("Import word document [ {} ] ", fileName);

      byte[] docBuf = buildDocumentTree(file, this._pageTitle);//建立文档树,具体过程后面会提到

      this._importInfo.setTreeDepth(this._treeDepth);

      this._session.put("wordImportFile", docBuf);

      this._session.put("wordImportBookmarks", this._bookmarks);

      this._session.put("wordImportNodes", this._orderedNodes);

      this._session.put("wordImportRoot", this._treeRoot);//存入文档相关信息

    }

    catch (InvalidDocumentException e)

    {

      super.addActionError("The selected file is not a valid binary Word 97-2003 document");

      return "error";

    }

    catch (DocumentToLargeException e)

    {

      Maybe<Message> xsrfValidationResult;

      super.addActionError(e.getMessage());

      return "error";

    }

    catch (Exception localException) {}finally

    {

      if ((file != null) &&

        (!file.delete())) {

        log.warn("Failed to delete uploaded file " + file.getAbsolutePath());

      }

    }

    if ((file == null) || (fileName == null)) {

      return "error";

    }

    String submitType = this._request.getParameter("submit");

    if ((submitType != null) && (submitType.equals("Import")))

    {

      this._importInfo.setTitle(getPage().getTitle());

      this._importInfo.setConflict(0);

      this._importInfo.setLvl(0);

      this._importInfo.setImportSpace(false);

      //存入文档相关信息(用户空间,标题等)

      return executeFileImport();

    }

    this._importInfo.setTitle(this._pageTitle);

    return "input";

  }

可以看出最后跳入excuteFileImport这个方法

public String executeFileImport()

    throws Exception {

    byte[] docBuf = (byte[])this._session.get("wordImportFile");//获取word文档输入流

    this._bookmarks = ((BookmarkInfo)this._session.get("wordImportBookmarks"));

    this._orderedNodes = ((ArrayList)this._session.get("wordImportNodes"));

    this._treeRoot = ((DocumentTreeNode)this._session.get("wordImportRoot"));

    this._pagesBeingDeleted = new ArrayList();

    this._pagesBeingOverwritten = new ArrayList();

    this._pagesBeingCreated = new ArrayList();

    //获取文档的书签,列表节点,根节点,上面三个数组是将word文档拆分到多个页面时需要做记录。

    User currentUser = AuthenticatedUserThreadLocal.getUser();

    boolean failFast = permissionChecks(currentUser);

    if (failFast) {

      return "error";

    }

    boolean showWarning = shouldShowWarning();

    if (showWarning) {

      return "warning";

    }

    if (!isValidTitle()) {

      return "error";

    }

    return doFileImport();//对用户校验后转入doFileImport

  }

doFileImport方法(方法太长摘取了一些片段):

    byte[] docBuf = (byte[])this._session.get("wordImportFile");

    this._bookmarks = ((BookmarkInfo)this._session.get("wordImportBookmarks"));//获取文档标签(插入图片或者其他内容时会用到)

    this._orderedNodes = ((ArrayList)this._session.get("wordImportNodes"));//获取节点数组

    this._treeRoot = ((DocumentTreeNode)this._session.get("wordImportRoot"));//获取文档根节点

    this._pagesBeingDeleted = new ArrayList();


    Page oldPage = this._treeRoot.getOldPage();

    Page rootPage;

    if (oldPage != null){

      Page rootPage = oldPage;

      oldPage = (Page)rootPage.clone();

      this._treeRoot.setOldPage(oldPage);

    }

    else{

      rootPage = new Page();

      rootPage.setSpace(getSpace());

      Page homePage = super.getSpace().getHomePage();

      if (homePage != null) {

        homePage.addChild(rootPage);

      }

      rootPage.setCreator(currentUser);

    }

    rootPage.setTitle(this._treeRoot.getText());

this._importInfo.setTitle(this._treeRoot.getText());

//判断是覆盖以前的页面还是保存为新页面,如果是新页面,作为主页的孩子节点加入进去

    try{

   Document doc = createNewDocument(docBuf);

   Doc2Wiki converter = new DocSplitter(new DefaultSplitImportContext(rootPage, this._treeRoot.getOldPage(), currentUser, this.pageManager, this._attachmentManager, this._bookmarks, this._orderedNodes, this._importInfo.getLvl(), this.ocSettingsManager, this.wikiMarkupToXhtmlConverter), this.ocSettingsManager.isDoFootnotes());

        doc.accept(converter);

      }

      else {

        StringBuffer out = new StringBuffer();

        Document doc = createNewDocument(docBuf);

        BookmarkCollection bkmks = doc.getRange().getBookmarks();

        Doc2Wiki converter = new Doc2Wiki(out, new DefaultImportContext(this.pageManager, rootPage, this._attachmentManager, bkmks, this.ocSettingsManager), truethis.ocSettingsManager.isDoFootnotes());

        doc.accept(converter);       rootPage.setBodyAsString(this.wikiMarkupToXhtmlConverter.convertToXhtml(out.toString()));

        if (this._treeRoot.getOldPage() == null) {

          this.pageManager.saveContentEntity(rootPage, null);

        } else {

          this.pageManager.saveContentEntity(rootPage, this._treeRoot.getOldPage(), null);

        }

      }

    }//利用转换器将word文档格式转换为包含html标签的格式。然后保存入mysql。

导入成功后转跳转请求, 

blob.png

找到viewaction,这时候jd-gui出问题了源代码出不来,百度了一下说是加了壳,没办过果断换一个反编译器,Luyten,这下源代码出来了。

blob.png

将存入数据库中的html代码和加上前台页面的html组合成一个页面保存在pagexhtmlcontent这个字符串中,然后跳转前台

blob.png

输出html代码,完成页面渲染。这就是word导入的整个流程。

来看看confluence如何解析word文档。

private byte[] buildDocumentTree(final File file, final String pageTitle) throws Exception {

        this._treeRoot = new DocumentTreeNode(pageTitle, 0);

        final DocumentTreeBuilder builder = new DocumentTreeBuilder(this._treeRoot);//创建文档树构造器

        final FileInputStream in = new FileInputStream(file);

        final ByteArrayOutputStream out = new ByteArrayOutputStream((int)file.length());//获取输出流

        final byte[] buf = new byte[4096];

        for (int read = in.read(buf); read != -1; read = in.read(buf)) {

            out.write(buf, 0, read);

        }//word文档写如输出流

        final byte[] docBuf = out.toByteArray();//转换为字节数组

        file.delete();

        Document doc = null;

        try {

            doc = this.createNewDocument(docBuf);//通过字节数组建立文档

        }

        catch (Throwable t) {

            throw new InvalidDocumentException();

        }

        doc.accept(builder);

        this._orderedNodes = new ArrayList();

        this.normalizeDocTree(this._treeRoot);

        this._bookmarks = builder.getBookmarks();

        return docBuf;

}

这里文档的建立采用了aspose插件包(不过confluence在aspose.word基础上做了修改)。查询api文档可以看到document这个类的构造方法有如下几种,

blob.png

confluence采用了最后一个构造方法。

posted @ 2018-11-13 08:21  maps..xy  阅读(288)  评论(0编辑  收藏  举报