背景:略
目标:完成request请求中的脚本过滤
技术:filter,jsoup,requestwapper
post
/put
/delete
: 请求的参数中,有可能是表单提交、也有可能是使用了@requestbody注解,那么参数就是json格式,位于request的流中。get
/options
等:可能存在于url参数中,也有可能是表单提交的预请求中,所以一般在能想到的位置都有可能存在,包括header中。2.1首先要从request请求中将各个需要过滤位置的参数取出来
2.2然后将参数取出来进行过滤
2.3将过滤后的参数重新包装成request传递下去
2.4在这期间,
需要准备好jsoup过滤脚本的工具类;需要自定义一个过滤器,并且在过滤器中添加匹配条件,如:那些url不需要过滤,那些请求方式必须进行过滤;对过滤器进行配置,是否开启,设置在整个过滤器链中的位置,设置过滤的白名单或者黑名单所以就很清晰了我们过滤需要哪些类,哪些配置了一个filter
一个requestwapper
一个jsoup工具类
一个filter的配置类
2.5进行数据测试
3.1.jsoup依赖:
<!--screen xss --> <dependency> <groupid>org.jsoup</groupid> <artifactid>jsoup</artifactid> <version>1.9.2</version> </dependency>
3.2jsoup工具类:jsouputil
import org.jsoup.jsoup; import org.jsoup.nodes.document; import org.jsoup.safety.whitelist; import java.io.filenotfoundexception; import java.io.ioexception; /*** @auther: qianshanmuxue* @date: 2019/2/27 19:32* @description: xss illegal label filtering*/ public class jsouputil { private static final whitelist whitelist = whitelist.simpletext();//jsoup白名单种类,有四种,每一种针对的标签类型不一样,具体的可以ctrl+左键点击simpletext,在jsoup源码中有响应的注释和标签名单//add mylf white list labelprivate static final document.outputttings outputttings = new document.outputttings().prettyprint(fal);static {whitelist.addattributes(":all", "style").addtags("p").addtags("strong");//将自定义标签添加进白名单,除开白名单之外的标签都会被过滤whitelist.prerverelativelinks(true);//这个配置的意思的过滤如果找不到成对的标签,就只过滤单个标签,而不用把后面所有的文本都进行过滤。//(之前在这个问题上折腾了很久,当<script>标签只有一个时,会<script>标签后面的数据全部过滤)} public static string clean(string content) { //过滤方法return jsoup.clean(content, "", whitelist, outputttings);} //test mainpublic static void main(string[] args) throws filenotfoundexception, ioexception {string text = "<a href=\"http://www.baidu.com/a\" onclick=\"alert(1);\"><strong><p>sss</p></strong></a><script>alert(0);</script>sss";system.out.println(clean(text));}}
3.3request包装类xsshttprvletrequestwrapper
import java.io.*;import java.util.*;import javax.rvlet.readlistener;import javax.rvlet.rvletinputstream;import javax.rvlet.http.httprvletrequest;import javax.rvlet.http.httprvletrequestwrapper;import com.xxx.utils.jsouputil;import org.jsoup.nodes.document;import org.springframework.util.stringutils; /** * @auther: qianshanmuxue * @date: 2019/2/27 16:24 * @description:request wapper u to get request parameter and request bdoy data and wapper another request */public class xsshttprvletrequestwrapper extends httprvletrequestwrapper { //因为我们需要获取request中的数据,所以需要继承java底层中httprvletrequestwrapper这个类,重写父类中的某些方法,获取相应位置的参数 private httprvletrequest orgrequest = null; private static final document.outputttings outputttings = new document.outputttings().prettyprint(fal); public xsshttprvletrequestwrapper(httprvletrequest request) { super(request); orgrequest = request; } @override public rvletinputstream getinputstream() throws ioexception {//get bufferedreader br = new bufferedreader(new inputstreamreader(orgrequest.getinputstream())); string line = br.readline(); string result = ""; if (line != null) { result += clean(line); } return new wrappedrvletinputstream(new bytearrayinputstream(result.getbytes())); } @override public string getparameter(string name) { if (("content".equals(name) || name.endswith("withhtml"))) { return super.getparameter(name); } name = clean(name); string value = super.getparameter(name); if (!stringutils.impty(value)) { value = clean(value); } return value; } @override public map getparametermap() { map map = super.getparametermap(); // 返回值map map<string, string> returnmap = new hashmap<string, string>(); iterator entries = map.entryt().iterator(); map.entry entry; string name = ""; string value = ""; while (entries.hasnext()) { entry = (map.entry) entries.next(); name = (string) entry.getkey(); object valueobj = entry.getvalue(); if (null == valueobj) { value = ""; } el if (valueobj instanceof string[]) { string[] values = (string[]) valueobj; for (int i = 0; i < values.length; i++) { value = values[i] + ","反思总结; } value = value.substring(0, value.length() - 1); } el { value = valueobj.tostring(); } 作文题目怎么写 returnmap.put(name, clean(value).trim()); } return returnmap; } @override public string[] getparametervalues(string name) { string[] arr = super.getparametervalues(name); if (arr != null) { for (int i = 0; i < arr.length; i++) { arr[i] = clean(arr[i]); } } return arr; } /** * get org request * * @return */ public httprvletrequest getorgr怎样打扮自己的头发equest() { return orgrequest; } /** * wapper request */ public static httprvletrequest getorgrequest(httprvletrequest req) { if (req instanceof xsshttprvletrequestwrapper) { return ((xsshttprvletrequestwrapper) req).getorgrequest(); } return req; } public string clean(string content) { string result = jsouputil.clean(content); return result; } private class wrappedrvletinputstream extends rvletinputstream { public void tstream(inputstream stream) { this.stream = stream; } private inputstream stream; public wrappedrvletinputstream(inputstream stream) { this.stream = stream; } @override public int read() throws ioexception { return stream.read(); } @override public boolean isfinished() { return true; } @override public boolean isready() { return true; } @override public void treadlistener(readlistener readlistener) { } }}
3.4filter-xssfilter
import org.apache.commons.lang.booleanutils;import org.apache.commons.lang.stringutils; import java.io.ioexception; import java.util.arraylist;import java.util.list;import java.util.regex.matcher;import java.util.regex.pattern; import javax.rvlet.filter;import javax.rvlet.filterchain;import javax.rvlet.filterconfig;import javax.rvlet.rvletexception;import javax.rvlet.rvletrequest;import javax.rvlet.rvletrespon;import javax.rvlet.http.httprvletrequest;import javax.rvlet.http.httprvletrespon; /** * @auther: qianshanmuxue * @date: 2019/2/27 16:25 * @description: *///@webfilter//@component 在这里可以不用这个注解,以为后面我们会在config中去配置这个filter,在这里只需要实现 filter 接口实现相应的方法就okpublic class xssfilter implements filter { private static boolean is_include_rich_text = fal;//用于接收配置中的参数,决定这个过滤器是否开启 public list<string> excludes = new arraylist<string>();//用于接收配置中的参数,决定哪些是不需要过滤的url(在这里,也可以修改handleexcludeurl()方法中相应的代码,使其变更为只需要过滤的url) @override public void dofilter(rvletrequest request, rvletrespon respon, filterchain chain) throws ioexception, rvletexception { httprvletrequest req = (httprvletrequest) request; httprvletrespon resp = (httprvletrespon) respon; if (handleexcludeurl(req, resp)) { chain.dofilter(request, respon); return; } xsshttprvletrequestwrapper xssrequest = new xsshttprvletrequestwrapper((httprvletrequest) request); chain.dofilter(xssrequest, respon); }/***此方法是决定对当前url是否执凉拌银耳的做法行过滤,*在这里没有使用请求方法(post/put)来匹配,因为在本项目中使用url匹配更适合(因为get和其他请求方式也需要进行过滤),如果你有兴趣可以把这个方法更改为匹配请求方法进行过滤**/ private boolean handleexcludeurl(httprvletrequest request, httprvletrespon respon) { if ((excludes == null || excludes.impty())&&is_include_rich_text) { return fal; } string url = request.getrvletpath(); for (string pattern : excludes) { pattern p = pattern.compile("^" + pattern); matcher m = p.matcher(url); if (m.find()) { return true; } } return fal; }/** *过滤器初始化,从配置类中获取参数,用于初始化两个参数(是否开启,排除指定的url list) * */ @override public void init(filterconfig arg0) throws rvletexception { string isincluderichtext = arg0.getinitparameter("isincluderichtext"); if (stringutils.isnotblank(isincluderichtext)) { is_include_rich_text = booleanutils.toboolean(isincluderichtext); } string temp = arg0.getinitparameter("excludes"); if (temp != null) { string[] url = temp.split(","); for (int i = 0; url != null && i < url.length; i++) { excludes.add(url个人自传1500字[i]); } } } @override public void destroy() { }}
3.5filter的配置类:xssconfig
import com.xxx.filter.xssfilter;import com.google.common.collect.maps;import org.springframework.boot.web.rvlet.filterregistrationbean;import org.springframework.context.annotation.bean;import org.springframework.context.annotation.configuration; import java.util.map; /** * @auther: qianshanmuxue * @date: 2019/2/27 16:49 * @description: xss filter config */@configurationpublic class xssconfig { @bean public filterregistrationbean xssfilterregistrationbean() { filterregistrationbean filterregistrationbean = new filterregistrationbean(); filterregistrationbean.tfilter(new xssfilter()); filterregistrationbean.torder(1);//filter order ,t it first filterregistrationbean.tenabled(true); filterregistrationbean.addurlpatterns("/*"); //t filter all url mapping map<string, string> initparameters = maps.newhashmap(); initparameters.put("excludes", "/oauth/token");///white list url initparameters.put("isincluderichtext", "true");//enable or disable filterregistrationbean.tinitparameters(initparameters); return filterregistrationbean; }}
调试截图:
请求:
程序截图:
运行结果:
可以看到body中 的脚本已经被过滤了,
然后其他的截图我就不发了,还有一种思路就是在过滤器中把字符转义。
感谢luckpet大佬的提示
1 bufferedreader 使用完需要关闭 ;
2 对于一些拿postman等工具的朋友,拼接json的话会有换行 这里result += clean(line); 需要改成: while((line = br.readline()) != null){ if (line != null) { result += line; } }
前阵子项目国测后,打开一个项目页面,莫名其妙弹出xss,搜了全局也没找到alert(“xss”),问了一下项目经理,原来是国测做防注入的时候,在添加数据的时候做的,一脸懵逼。
查了一下资料,以前做项目的时候都没想到这个问题,如果保存一段script脚本,查数据的时候,这段脚本就会被执行,这东西后果挺严重啊,如果是在桌面外弹框,执行个挖矿脚本,这玩意不得了啊,厉害,长知识了。。。
<dependency> <groupid>org.jsoup</groupid> <artifactid>jsoup</artifactid> <version>1.11.3</version></dependency>
以上为个人经验,希望能给大家一个参考,也希望大家多多支持www.887551.com。
本文发布于:2023-04-04 03:53:01,感谢您对本站的认可!
本文链接:https://www.wtabcd.cn/fanwen/zuowen/f214a193cca4aee0f6c82c98d6748128.html
版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,我们将在24小时内删除。
本文word下载地址:springboot中使用过滤器,jsoup过滤XSS脚本详解.doc
本文 PDF 下载地址:springboot中使用过滤器,jsoup过滤XSS脚本详解.pdf
留言与评论(共有 0 条评论) |