function
rule: prefix + timestr + sequence number
DROP function IF EXISTS nextNo;
delimiter ;;
CREATE function nextNo(tableType char(20)) returns char(20)
reads sql data
commit '表单号'
begin
declare prefix char(2);
-- declare table_prefix char(3);
-- set table_prefix = 'bs_';
select prefix=
case tableType
when 'role' then 'ro'
end
declare timestr INT;
SET timestr = round(date_format(now(),'%Y%m%d%H%m%s') * 1.72);
-- len 14
declare lastval int;
select lastval =
case tableType
when 'role' then (select right(max(role_no), 4) from `bs_role`)
end
if lastval is null set lastval = prefix + '0001';
return prefix + timestr + right('000' + convert(varchar(10),lastval + 1),4)
end
;;
delimiter ;
select nextNo('role');
procedure
rule: timestr + ordersn
DROP PROCEDURE IF EXISTS usp_seqnum;
delimiter ;;
CREATE PROCEDURE usp_seqnum()
reads sql data
comment '序列号'
BEGIN
-- 定义变量并获取相关值
DECLARE v_cnt INT;
DECLARE v_timestr INT;
SET v_timestr = DATE_FORMAT(NOW(),'%Y%m%d');
SELECT ROUND(RAND()*100,0)+1 INTO v_cnt;
-- 新建表
DROP TABLE IF EXISTS im_orderseq;
CREATE TABLE im_orderseq(
timestr NVARCHAR(8) NOT NULL ,
ordersn INT(3)
);
START TRANSACTION;
-- 更新表的最值
UPDATE im_orderseq SET ordersn = ordersn + v_cnt WHERE timestr = v_timestr;
IF ROW_COUNT() = 0 THEN
-- 插入数据
INSERT INTO im_orderseq(timestr,ordersn) VALUES(v_timestr,v_cnt);
END IF;
SELECT CONCAT(v_timestr,LPAD(ordersn,7,0))AS ordersn
FROM im_orderseq WHERE timestr = v_timestr;
COMMIT;
END;
;;
delimiter ;
CALL usp_seqnum();
SELECT * FROM im_orderseq;
function
or PROCEDURE
first, initialize table structure and insert demo data.
DROP TABLE IF EXISTS `tree_node`;
CREATE TABLE `tree_node`(
`id` INT COMMENT "登录日志ID" ,
`pid` INT NULL,
`name` VARCHAR(5),
`sort` INT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='树形结构表';
insert into tree_node values(1,0,"A",0);
insert into tree_node values(7,0,"G",2);
insert into tree_node values(2,1,"B",3);
insert into tree_node values(3,1,"C",4);
insert into tree_node values(4,1,"D",5);
insert into tree_node values(5,4,"E",6);
insert into tree_node values(6,4,"F",7);
insert into tree_node values(8,7,"H",8);
insert into tree_node values(9,7,"I",9);
insert into tree_node values(10,7,"J",10);
insert into tree_node values(11,10,"K",11);
insert into tree_node values(12,10,"L",12);
insert into tree_node values(13,11,"M",13);
insert into tree_node values(14,11,"N",14);
get all the children ids by parent id
DROP function IF EXISTS getTreeChild;
delimiter ;;
CREATE function getTreeChild( rootid int) RETURNS VARCHAR(5000)
COMMENT '树形节点ID'
reads sql data
BEGIN
DECLARE sTemp VARCHAR(200);
DECLARE sTempChd VARCHAR(200);
SET sTemp = '$';
SET sTempChd = cast(rootid as char);
WHILE sTempChd is not NULL DO
SET sTemp = concat(sTemp,',',sTempChd);
SELECT group_concat(id) INTO sTempChd FROM tree_node where find_in_set(pid,sTempChd)>0;
END WHILE;
return sTemp;
END
;;
delimiter ;
-- -----------------------------------------
select getTreeChild(0)
get tree order data
DROP PROCEDURE IF EXISTS `createChildLst`;
delimiter ;;
CREATE PROCEDURE `createChildLst`(IN pidin INT,IN nDepth INT)
COMMENT '入口过程'
BEGIN
DECLARE done INT DEFAULT 0;
DECLARE b INT;
DECLARE cur1 CURSOR FOR SELECT id FROM tree_node where pid=pidin order by sort;
DECLARE CONTINUE HANDLER FOR NOT FOUND SET done = 1;
OPEN cur1;
FETCH cur1 INTO b;
INSERT INTO tmpLst VALUES (NULL,pidin,nDepth,done);
WHILE done=0 DO
CALL createChildLst(b,nDepth+1);
FETCH cur1 INTO b;
END WHILE;
CLOSE cur1;
END
;;
delimiter ;
-- ----------------------------
-- Procedure structure for `showChildLst`
-- ----------------------------
DROP PROCEDURE IF EXISTS `showChildLst`;
delimiter ;;
CREATE PROCEDURE `showChildLst`(IN pid INT)
COMMENT '递归过程'
BEGIN
CREATE TEMPORARY TABLE IF NOT EXISTS tmpLst(sno int primary key auto_increment,id int,depth int,isLeaf int);
DELETE FROM tmpLst;
CALL createChildLst(pid,0);
select tmpLst.*,tree_node.* from tmpLst,tree_node where tmpLst.id=tree_node.id order by tmpLst.sno;
END
;;
delimiter ;
-- ----------------------------------------
set max_sp_recursion_depth=255;
call showChildLst(1);
create table app_users
(
app_user_id smallint(6) not null auto_increment primary key,
api_key char(36) not null default uuid()
);
use Trigger
CREATE TRIGGER before_insert_app_users
BEFORE INSERT ON app_users
FOR EACH ROW
IF new.api_key IS NULL
THEN
SET new.api_key = uuid();
END IF;
You still have to update previously existing rows, like this:
UPDATE app_users SET api_key = (SELECT uuid());
场景一:
用户角色关系, 用户表,角色表,用户角色表。 用户可以拥有多个角色。查询所有用户并附带出他们所属的角色。
场景二:
文章标签关系,文章表,标签表,文章标签关系表。查询所有文章并附带出他们所属的标签。
下面以场景一举例
创建用户表
CREATE TABLE `bs_user` (
`id` char(36) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL,
`user_name` varchar(20) NOT NULL,
`password` varchar(32) NOT NULL,
`name` varchar(40) NOT NULL,
`mail` varchar(40) DEFAULT NULL,
`tel` varchar(11) DEFAULT NULL,
`sex` varchar(1) DEFAULT NULL,
`birthday` date DEFAULT NULL,
`created_at` datetime NOT NULL,
`modified_at` datetime NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
创建角色表
CREATE TABLE `bs_role` (
`role_id` char(36) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL,
`role_name` varchar(10) NOT NULL,
`description` varchar(100) DEFAULT NULL,
`creator_id` char(36) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL,
`created_at` datetime NOT NULL,
`modified_id` char(36) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL,
`modified_at` datetime DEFAULT CURRENT_TIMESTAMP,
`is_del` int(1) DEFAULT '0',
PRIMARY KEY (`role_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
创建用户角色关系表
CREATE TABLE `bs_user_role` (
`user_id` char(36) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL,
`role_id` char(36) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL,
`created_at` datetime NOT NULL,
`modified_at` datetime NOT NULL,
PRIMARY KEY (`user_id`,`role_id`),
KEY `role_id` (`role_id`) USING BTREE
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
分解
select a.id as user_id, a.user_name, a.name,
group_concat(s.role_id) AS role_id,
group_concat(s.role_name) AS role_name,
group_concat(s.description) AS description from bs_user a
left outer join (
select b.user_id user_id, b.role_id role_id, c.role_name role_name, c.description description from bs_user_role b
left join bs_role c on b.role_id=c.role_id
) AS s on s.user_id = a.id
where a.is_del=0
group by user_id
order by created_at desc limit 0, 10;
结果
]]>assign->('data', $data)
这些也给后端做了。或者前端就丢给UI(美工)做,前端是不存在的。
[[back] ->[view]]-> [front end]
但是随着移动端的兴起,开始流行前后分离。
后端渲染模版的这一层就给省略掉了.
变成前后分离。其实所谓的后端渲染,也就是渲染首屏幕。
[back] ->-> [front and view]
但是又因为前后分离对搜索引擎爬虫不太友好。想当年,做网站的人都十分在意一个东西,就是Alexa排名,后者国内的百度排名,cnzz排名等等。毕竟那个时候风头的人都会看一下你的网站alexa是多少。其实这种指标就是反应了一个网站的活跃度。不如说,你说你的网站有几百万用户,但是别人查了一下这种排名,尽然几千万,搜索引擎收录几乎没有。这样做你认为会有信服力吗?
当然SEO需求大部分面向C端的。面向后台管理系统的当然就没有那么在乎了。
为了解决SPA bad SEO 问题。例如后台服务器拦截拦截特定爬虫进行SEO hack,批量提交Sitemap 等等的手段。简单的说,你想告诉搜索引擎链接,然后爬虫跳进来,进行相应的SEO hack 投食时渲染。
我们把这种形式叫做 前后分离 + 后端辅助(SPA hack回滚)。
但是随着node,babel的快速发展,浏览器快速迭代,前端工作天天调试兼容的时代过去了。变成了后端服务器 + 前端服务器这种架构。[back server] ->-> [[view-server]front server]。
前端的地位重要多了。同构这个概念于是被提出了。当时前端们就设想 后端渲染跟前端异步渲染可以重用。比如一个循环的列表,里面的item 模版可以被前端重用。到后来,随着 react vue 等框架开始流行,nextjs诞生了。这个阶段,主要是SPA的改进。node服务器把SEO需要的功能都做了。
既然node可以写server层,那么为什么不可以后端也用node呢。当然这用什么语言写后端,并不影响前端。
我们把这个形式称做,SPA + SSR前端渲染。
既然可以SSR前端渲染,为什么不可以后端渲染呢!当然服务器的权利就只剩下了 node。
又开始 后端的 Route -> Controller -> Model-> View.
这时候的 View 就变成了react 或者 vue. 其实就把当年的后端模版替换成了 前端框架模版,model输出的数据就变成 View的prop或者data 变量。
我们把这种形式称为 SSR后端渲染。
到头来,你会发现,走了一圈,又回到了原点。
]]>其实本人不赞成javascript进行hash混淆处理,一拖慢运行时速度,二体积大。JS代码前端可获取,天生赋予“开源”属性,都可以在chrome devTools下查看。JS非压缩性混淆完全违法前端优化准则。
目前网络上可以搜索的JS混淆工具不外乎以下几种:
eval混淆,也是最早JS出现的混淆加密,据说第一天就被破解,修改一下代码,alert一下就可以破解了。这种方法从出生的那天就失去了意义。其实JS加密(混淆)是相对于可读性而言的,其实真正有意义的就是压缩型混淆uglify这一类,即可减少体重,也可减少可读性。
但是,也不能排除部分商业源代码使用hash类型混淆源代码,比如 miniui 使用的JSA加密, fundebug使用的javascript-obfuscator。
下面通过代码来说明 JSA加密 和 javascript-obfuscator 的区别:
要混淆的代码:
function logG(message) {
console.log('\x1b[32m%s\x1b[0m', message);
}
function logR(message) {
console.log('\x1b[41m%s\x1b[0m', message);
}
logG('logR');
logR('logG');
通过JSA加密混淆后生成的代码
function o00($){console.log("\x1b[32m%s\x1b[0m",$)}function o01($){console.log("\x1b[41m%s\x1b[0m",$)}o00("logR");o01("logG")
然后再beautifier一下:
function o00($) {
console.log("\x1b[32m%s\x1b[0m", $)
}
function o01($) {
console.log("\x1b[41m%s\x1b[0m", $)
}
o00("logR");
o01("logG")
可以发现,其实没有做什么什么修改,只是做了一些变量替换。想还原也比较简单的。这里就不拿它来做代表,也没有什么人用。
通过javascript-obfuscator混淆后生成的代码
var _0xd6ac=['[41m%s[0m','logG','log'];(function(_0x203a66,_0x6dd4f4){var _0x3c5c81=function(_0x4f427c){while(--_0x4f427c){_0x203a66['push'](_0x203a66['shift']());}};_0x3c5c81(++_0x6dd4f4);}(_0xd6ac,0x6e));var _0x5b26=function(_0x2d8f05,_0x4b81bb){_0x2d8f05=_0x2d8f05-0x0;var _0x4d74cb=_0xd6ac[_0x2d8f05];return _0x4d74cb;};function logG(_0x4f1daa){console[_0x5b26('0x0')]('[32m%s[0m',_0x4f1daa);}function logR(_0x38b325){console[_0x5b26('0x0')](_0x5b26('0x1'),_0x38b325);}logG('logR');logR(_0x5b26('0x2'));
再beautifier一下:
var _0xd6ac = ['[41m%s[0m', 'logG', 'log'];
(function(_0x203a66, _0x6dd4f4) {
var _0x3c5c81 = function(_0x4f427c) {
while (--_0x4f427c) {
_0x203a66['push'](_0x203a66['shift']());
}
};
_0x3c5c81(++_0x6dd4f4);
}(_0xd6ac, 0x6e));
var _0x5b26 = function(_0x2d8f05, _0x4b81bb) {
_0x2d8f05 = _0x2d8f05 - 0x0;
var _0x4d74cb = _0xd6ac[_0x2d8f05];
return _0x4d74cb;
};
function logG(_0x4f1daa) {
console[_0x5b26('0x0')]('[32m%s[0m', _0x4f1daa);
}
function logR(_0x38b325) {
console[_0x5b26('0x0')](_0x5b26('0x1'), _0x38b325);
}
logG('logR');
logR(_0x5b26('0x2'));
这个复杂得多,但是分析一下你会发现,其实多了一个字典,所有方法变量,都有可能存在字典中,调用时先调用字典还原方法名变量再执行。
其实入口都是变量的规则。
字典函数:
var _0xd6ac = ['[41m%s[0m', 'logG', 'log'];
(function(_0x203a66, _0x6dd4f4) {
var _0x3c5c81 = function(_0x4f427c) {
while (--_0x4f427c) {
_0x203a66['push'](_0x203a66['shift']());
}
};
_0x3c5c81(++_0x6dd4f4);
}(_0xd6ac, 0x6e));
var _0x5b26 = function(_0x2d8f05, _0x4b81bb) {
_0x2d8f05 = _0x2d8f05 - 0x0;
var _0x4d74cb = _0xd6ac[_0x2d8f05];
return _0x4d74cb;
};
通过以上发现,我们可以把JS混淆归结为三类,分别是 eval类型,hash类型,压缩类型。而压缩类型,是目前前端性能优化的常用工具,以uglify为代表。
常用的前端压缩优化工具:
JavaScript:
CSS:
HTML:
从工具流(workflow) 来看,不论是 webpack 还是 gulp ,目前javascript最流行工具还是uglify。
相应的解混淆工具:
eval对应的解混淆工具, 随便百度都可以搜索到,如jspacker
JSA对应的解混淆工具unjsa
javascript-obfuscator对应的解混淆工具crack.js
压缩类型uglify对应的工具UnuglifyJS,在线版jsnice
解混淆策略其实是依据生成代码规律编写,不外乎观察特征分析,再观察特征分析,不断调整。都是手办眼见功夫。
都没有什么难度可言,有的就是耐性。比如javascript-obfuscator对应的解混淆工具可以
分解为N因子问题:
如何查询function的作用域?
预执行变量替换可能存在类型?
…
如:
var _0xd6ac = ['[41m%s[0m', 'logG', 'log'];
(function(_0x203a66, _0x6dd4f4) {
var _0x3c5c81 = function(_0x4f427c) {
while (--_0x4f427c) {
_0x203a66['push'](_0x203a66['shift']());
}
};
_0x3c5c81(++_0x6dd4f4);
}(_0xd6ac, 0x6e));
var _0x5b26 = function(_0x2d8f05, _0x4b81bb) {
_0x2d8f05 = _0x2d8f05 - 0x0;
var _0x4d74cb = _0xd6ac[_0x2d8f05];
return _0x4d74cb;
};
function logG(_0x4f1daa) {
console[_0x5b26('0x0')]('[32m%s[0m', _0x4f1daa);
}
function logR(_0x38b325) {
console[_0x5b26('0x0')](_0x5b26('0x1'), _0x38b325);
}
logG('logR');
logR(_0x5b26('0x2'));
要还原成
function logG(message) {
console.log('\x1b[32m%s\x1b[0m', message);
}
function logR(message) {
console.log('\x1b[41m%s\x1b[0m', message);
}
logG('logR');
logR('logG');
第一步你总得知道字典函数,然后执行字典函数 _0x5b26('0x0')
还原成 log
.
那么就好办了,写代码的事。
如 https://github.com/jscck/crack.js/blob/master/crack.js
还原后,如何重构代码,那么你还得知道代码生成之前是通过什么工具打包的webpack? 还是?
如webpack 的各种封装头和尾
https://webpack.js.org/configuration/output/#expose-a-variable
(function webpackUniversalModuleDefinition(root, factory) {
if(typeof exports === 'object' && typeof module === 'object')
module.exports = factory();
else if(typeof define === 'function' && define.amd)
define([], factory);
else if(typeof exports === 'object')
exports['MyLibrary'] = factory();
else
root['MyLibrary'] = factory();
})(typeof self !== 'undefined' ? self : this, function() {
return _entry_return_;
});
假如再深入一点,可能会涉及到JS语法解释器, AST抽象语法树
目前涉及到 JS语法解释器, AST抽象语法树的功能如下:
或者可以阅读《编程语言实现模式》,涉及到 antlr4。
当然也可以通过esprima等工具来做解混淆,只是工作量大一点,值不值的问题。
对于未来,JS商业源码加密的方向可能webassembly,先在服务端编译成wasm,源码就能真正的闭源。
有人的地方就有路,有混淆的地方就有解混淆,目前机器学习编程响应的解混淆工具也做的相当出色,比如
Machine Learning for Programming 产品
nice2predict,jsnice …
查看 https://www.sri.inf.ethz.ch/research/plml
为什么额外说一下AST抽象语法树,因为你可以 input-> ast -> output Anything。
比如你jsx转换小程序模版语法,这样你就可以用react语法来写小程序,如Taro。
mpvue, wepy, postcss …… 这些都是通过AST进行构建转换的工具,es6 -> es5, babel 都是使用AST。
AST抽象语法树大致流程:
Input 生成 AST tree
然后通过AST类型断言进行相应的转换
http://esprima.org/demo/parse.html
小程序
https://github.com/qwerty472123/wxappUnpacker
推荐.Net、C# 逆向反编译四大工具利器
https://www.cnblogs.com/ldc218/p/8945892.html
2018年支持java8的Java反编译工具汇总
https://blog.csdn.net/yannqi/article/details/80847354
预计阅读时间:10分钟
在此页面上,您将构建一个在Docker Compose上运行的简单Python Web应用程序。该应用程序使用Flask框架并在Redis中维护一个命中计数器。虽然该示例使用Python,但即使您不熟悉它,此处演示的概念也应该是可以理解的。
确保您已经安装了 Docker Engine 和 Docker Compose. 您不需要安装Python或Redis,因为两者都是由Docker镜像提供的。
定义应用程序依赖(dependencies).
为项目创建一个目录:
$ mkdir composetest
$ cd composetest
在项目目录中创建一个名为app.py
的文件并粘贴如下:
import time
import redis
from flask import Flask
app = Flask(__name__)
cache = redis.Redis(host='redis', port=6379)
def get_hit_count():
retries = 5
while True:
try:
return cache.incr('hits')
except redis.exceptions.ConnectionError as exc:
if retries == 0:
raise exc
retries -= 1
time.sleep(0.5)
@app.route('/')
def hello():
count = get_hit_count()
return 'Hello World! I have been seen {} times.\n'.format(count)
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
In this example, redis
is the hostname of the redis container on the application’s network. We use the default port for Redis, 6379
.
Handling transient errors
Note the way the
get_hit_count
function is written. This basic retry loop lets us attempt our request multiple times if the redis service is not available. This is useful at startup while the application comes online, but also makes our application more resilient if the Redis service needs to be restarted anytime during the app’s lifetime. In a cluster, this also helps handling momentary connection drops between nodes.
在项目目录中创建另一个名为requirements.txt
的文件并粘贴如下:
flask
redis
在此步骤中,您将编写一个构建包含Python应用程序所需的所有依赖项,包括Python本身的Docker镜像的Dockerfile。
在项目目录中,创建一个名为Dockerfile
的文件并粘贴以下内容:
FROM python:3.4-alpine
ADD . /code
WORKDIR /code
RUN pip install -r requirements.txt
CMD ["python", "app.py"]
这里告诉Docker要做什么:
.
into the path /code
in the image./code
.python app.py
.更多关于如何编写Dockerfiles的信息,请查看 Docker user guide 和 Dockerfile reference.
在项目目录中创建一个名为docker-compose.yml
的文件并粘贴以下内容:
version: '3'
services:
web:
build: .
ports:
- "5000:5000"
redis:
image: "redis:alpine"
这Compose文件定义了两个服务, web
and redis
. The web
service:
Dockerfile
in the current directory.5000
.The redis
service uses a public Redis image pulled from the Docker Hub registry.
docker-compose up
启动应用程序.$ docker-compose up
Creating network "composetest_default" with the default driver
Creating composetest_web_1 ...
Creating composetest_redis_1 ...
Creating composetest_web_1
Creating composetest_redis_1 ... done
Attaching to composetest_web_1, composetest_redis_1
web_1 | * Running on http://0.0.0.0:5000/ (Press CTRL+C to quit)
redis_1 | 1:C 17 Aug 22:11:10.480 # oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0Oo
redis_1 | 1:C 17 Aug 22:11:10.480 # Redis version=4.0.1, bits=64, commit=00000000, modified=0, pid=1, just started
redis_1 | 1:C 17 Aug 22:11:10.480 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf
web_1 | * Restarting with stat
redis_1 | 1:M 17 Aug 22:11:10.483 * Running mode=standalone, port=6379.
redis_1 | 1:M 17 Aug 22:11:10.483 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.
web_1 | * Debugger is active!
redis_1 | 1:M 17 Aug 22:11:10.483 # Server initialized
redis_1 | 1:M 17 Aug 22:11:10.483 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.
web_1 | * Debugger PIN: 330-787-903
redis_1 | 1:M 17 Aug 22:11:10.483 * Ready to accept connections
Compose拉取Redis镜像,为您的代码构建镜像,并启动您定义的服务。 在这种情况下,代码在构建时静态复制到映像中。
在浏览器输入 http://0.0.0.0:5000/
查看应用运行情况.
If you’re using Docker natively on Linux, Docker for Mac, or Docker for Windows, then the web app should now be listening on port 5000 on your Docker daemon host. Point your web browser to http://localhost:5000
to find the Hello World
message. If this doesn’t resolve, you can also try http://0.0.0.0:5000
.
If you’re using Docker Machine on a Mac or Windows, use docker-machine ip MACHINE_VM
to get the IP address of your Docker host. Then, open http://MACHINE_VM_IP:5000
in a browser.
You should see a message in your browser saying:
Hello World! I have been seen 1 times.
刷新网页.
The number should increment.
Hello World! I have been seen 2 times.
切换到另一个终端窗口,然后键入docker image ls
列出本地镜像.
Listing images at this point should return redis
and web
.
$ docker image ls
REPOSITORY TAG IMAGE ID CREATED SIZE
composetest_web latest e2c21aa48cc1 4 minutes ago 93.8MB
python 3.4-alpine 84e6077c7ab6 7 days ago 82.5MB
redis alpine 9d8fa9aa0e5b 3 weeks ago 27.5MB
You can inspect images with docker inspect <tag or id>
.
通过在另一个终端在项目目录中运行docker-compose down
, 或者在启动应用程序的终端中按CTRL + C 来停止应用程序.
编辑项目目录中的docker-compose.yml
为web
服务添加 bind mount :
version: '3'
services:
web:
build: .
ports:
- "5000:5000"
volumes:
- .:/code
redis:
image: "redis:alpine"
The new volumes
key 把项目目录(当前目录)挂载到容器内的/ code
允许您动态修改代码,而无需重建映像
在项目目录中,键入docker-compose up
以使用更新的Compose文件构建应用程序,然后运行它。
$ docker-compose up
Creating network "composetest_default" with the default driver
Creating composetest_web_1 ...
Creating composetest_redis_1 ...
Creating composetest_web_1
Creating composetest_redis_1 ... done
Attaching to composetest_web_1, composetest_redis_1
web_1 | * Running on http://0.0.0.0:5000/ (Press CTRL+C to quit)
...
Check the Hello World
message in a web browser again, and refresh to see the count increment.
Shared folders, volumes, and bind mounts
If your project is outside of the
Users
directory (cd ~
), then you need to share the drive or location of the Dockerfile and volume you are using. If you get runtime errors indicating an application file is not found, a volume mount is denied, or a service cannot start, try enabling file or drive sharing. Volume mounting requires shared drives for projects that live outside ofC:\Users
(Windows) or/Users
(Mac), and is required for any project on Docker for Windows that uses Linux containers. For more information, see Shared Drives on Docker for Windows, File sharing on Docker for Mac, and the general examples on how to Manage data in containers.If you are using Oracle VirtualBox on an older Windows OS, you might encounter an issue with shared folders as described in this VB trouble ticket. Newer Windows systems meet the requirements for Docker for Windows and do not need VirtualBox.
由于应用程序代码现在使用卷安装到容器中,因此您可以更改其代码并立即查看更改,而无需重建映像.
Change the greeting in app.py
and save it. For example, change the Hello World!
message to Hello from Docker!
:
return 'Hello from Docker! I have been seen {} times.\n'.format(count)
Refresh the app in your browser. The greeting should be updated, and the counter should still be incrementing.
如果你想在后台运行你的服务, 你可以在docker-compose up
命令后面添加 -d
(for “detached” mode),使用docker-compose ps
查看当前正在运行的内容:
$ docker-compose up -d
Starting composetest_redis_1...
Starting composetest_web_1...
$ docker-compose ps
Name Command State Ports
-------------------------------------------------------------------
composetest_redis_1 /usr/local/bin/run Up
composetest_web_1 /bin/sh -c python app.py Up 5000->5000/tcp
docker-compose run
命令允许您为服务运行一次性命令。例如,查看web
服务可用的环境变量:
$ docker-compose run web env
可以通过docker-compose --help
查看更多其他可用命令 . 您还可以为bash和zsh shell安装 command completion,以查看可用的命令.
如果您使用docker-compose up -d
启动Compose,请在完成后停止服务:
$ docker-compose stop
您可以使用down
命令将所有内容放下,完全删除容器. 传递--volumes
也可以删除Redis容器使用的数据挂载:
$ docker-compose down --volumes
到这里, 您已经了解了Compose如何工作的基础知识.
To learn more about volumes and bind mounts, see Manage data in Docker
documentation, docs, docker, compose, orchestration, containers
Terry edit on markdown编辑
]]>回归正题吧!
你只会答:建立请求,接收数据,dom渲染等。那么百度一下别人是怎么写?
其实也差不多只是分解的很细,按我的理解说一下吧
1) 如何建立请求?引申出来的HTTP协议的理解了?
那么HTTP协议如何工作?
一个基本HTTP系统有什么组件构成?
过程是怎么样的?
[client/request] –[Proxy] – [Proxy] – [Proxy]– [Server/response]
其中Proxies中可以进行如下操作:
HTTP大致的流程:
1、Open a TCP connection (建立TCP链接)
2、Send an HTTP message (发送HTTP消息)
3、Read the response sent by the server (读取服务器响应信息)
4、Close or reuse the connection for further requests.(关闭或者重用链接)
https://docs.w3cub.com/http/overview/
2)如何渲染页面的?
任何浏览器都应该有的7大组件
涉及到的要点:DOM Tree, Html Parser, Css Parser, Layout, Painting, Css Box Modal(盒子模型), Positioning 等等,很长不一一说,可以阅读下面链接。
https://www.html5rocks.com/en/tutorials/internals/howbrowserswork/
你只会答:你指的是setTimeout,作用域,执行机制之类的吗?
其实想问的是浏览器单线程是如何工作的? setTimeout在当前作用域最后调用只是一个子集
alert 和 synchronous XHR 会堵塞浏览器
eventLoop 大致的执行顺序
while (eventLoop.waitForTask()) {
const taskQueue = eventLoop.selectTaskQueue()
if (taskQueue.hasNextTask()) {
taskQueue.processNextTask()
}
const microtaskQueue = eventLoop.microTaskQueue
while (microtaskQueue.hasNextMicrotask()) {
microtaskQueue.processNextMicrotask()
}
if (shouldRender()) {
applyScrollResizeAndCSS()
runAnimationFrames()
render()
}
}
https://developer.mozilla.org/en-US/docs/Web/JavaScript/EventLoop
https://blog.risingstack.com/writing-a-javascript-framework-execution-timing-beyond-settimeout/
过程:
跟上一次 vircual dom diff 后生成语法树, 然后patch回去 native dom.
状态机 -> virtual dom(template) diff -> 生成 patch -> native dom.
Vue virtual DOM patching 算法是基于 https://github.com/snabbdom/snabbdom
diff 算法大致 O(n3) -> O(n) 引入两个前提条件:elements Types 和 collection key
React:
https://reactjs.org/docs/reconciliation.html
Vuex
Redux
Dva
Redux-saga
-完-
]]>https://www.nginx.com/resources/library/complete-nginx-cookbook/
正题
我们都清楚 O’Reilly Cookbook 类型的书籍的风格,主要行文风格都是提出问题,给出答案并解决问题。
这本说主要分为三部分讲解:
最常用到的当然是第一章,负载均衡;十一章,访问控制,如何设置跨域;十三章,https配置等。
相对于开发者而言,我们更多的只需要了解第一部分。第三部分对于开发者没有什么必要,所以就没有记录,有需要的可以自行阅读。
1.1 HTTP Load Balancing(http负载均衡)
upstream backend {
server 10.10.12.45:80 weight=1;
server app.example.com:80 weight=2;
}
server {
location / {
proxy_pass http://backend;
}
}
你把当前请求负载到多个server上,同时server可以指定权重(weight)。
更多配置可以访问 https://docs.w3cub.com/nginx/stream/ngx_stream_upstream_module/#upstream
1.2 TCP Load Balancing (TCP负载均衡)
stream {
upstream mysql_read {
server read1.example.com:3306 weight=5;
server read2.example.com:3306;
server 10.10.12.34:3306 backup;
}
server {
listen 3306;
proxy_pass mysql_read;
}
}
1.3 Load-Balancing Methods (负载均衡方法)
The following load-balancing methods are available for upstream HTTP, TCP, and UDP pools:
五种方法(指令名称):
Round robin ( weight=x)
Least connections (least_conn)
Least time (least_time)
Generic hash (hash)
IP hash (ip_hash)
阅读: https://docs.w3cub.com/nginx/stream/ngx_stream_upstream_module/
1.4 Connection Limiting (连接数限制)
upstream backend {
zone backends 64k;
queue 750 timeout=30s;
server webserver1.example.com max_conns=25;
server webserver2.example.com max_conns=15;
}
2.1 Sticky Cookie (粘性Cookie)
You need to bind a downstream client to an upstream server
sticky cookie 指令
upstream backend {
server backend1.example.com;
server backend2.example.com;
sticky cookie
affinity
expires=1h
domain=.example.com
httponly
secure
path=/;
}
2.2 Sticky Learn
You need to bind a downstream client to an upstream server by using an existing cookie.
sticky learn 指令
upstream backend {
server backend1.example.com:8080;
server backend2.example.com:8081;
sticky learn
create=$upstream_cookie_cookiename
lookup=$cookie_cookiename
zone=client_sessions:2m;
}
2.3 Sticky Routing
提供一个映射修正处理
map $cookie_jsessionid $route_cookie {
~.+\.(?P<route>\w+)$ $route;
}
map $request_uri $route_uri {
~jsessionid=.+\.(?P<route>\w+)$ $route;
}
upstream backend {
server backend1.example.com route=a;
server backend2.example.com route=b;
sticky route $route_cookie $route_uri;
}
2.4 Connection Draining
You need to gracefully remove servers for maintenance or other reasons while still serving sessions.
curl 'http://localhost/upstream_conf?upstream=backend&id=1&drain=1'
5.1 Caching Zones (缓存区)
You need to cache content and need to define where the cache is stored.
Use the proxy_cache_path directive to define shared memory cache
zones and a location for the content:
proxy_cache_path /var/nginx/cache
keys_zone=CACHE:60m
levels=1:2
inactive=3h
max_size=20g;
proxy_cache CACHE;
5.2 Caching Hash Keys
You need to control how your content is cached and looked up.
Use the proxy_cache_key directive, along with variables to define
what constitutes a cache hit or miss:
proxy_cache_key "$host$request_uri $cookie_user";
5.3 Cache Bypass
Use the proxy_cache_bypass directive with a nonempty or nonzero
value. One way to do this is by setting a variable within location
blocks that you do not want cached to equal 1:
proxy_cache_bypass $http_cache_bypass;
The configuration tells NGINX to bypass the cache if the HTTP
request header named cache_bypass is set to any value that is not 0.
5.4 Cache Performance (性能)
location ~* \.(css|js)$ {
expires 1y;
add_header Cache-Control "public";
}
5.5 Purging ()
map $request_method $purge_method {
PURGE 1;
default 0;
}
server {
...
location / {
...
proxy_cache_purge $purge_method;
}
}
Part II: Security and Access(安全和访问)
11.1 Access Based on IP Address
根据客户端IP设定访问权限
location /admin/ {
deny 10.0.0.1;
allow 10.0.0.0/20;
allow 2001:0db8::/32;
deny all;
}
11.2 Allowing Cross-Origin Resource Sharing
跨域资源共享, 这里的OPTIONS 处理可以参考
map $request_method $cors_method {
OPTIONS 11;
GET 1;
POST 1;
default 0;
}
server {
...
location / {
if ($cors_method ~ '1') {
add_header 'Access-Control-Allow-Methods'
'GET,POST,OPTIONS';
add_header 'Access-Control-Allow-Origin'
'*.example.com';
add_header 'Access-Control-Allow-Headers'
'DNT,
Keep-Alive,
User-Agent,
If-Modified-Since,
Cache-Control,
Content-Type';
}
if ($cors_method = '11') {
add_header 'Access-Control-Max-Age' 1728000;
add_header 'Content-Type' 'text/plain; charset=UTF-8';
add_header 'Content-Length' 0;
return 204;
}
}
}
12.1 Limiting Connections
限制连接数
You need to limit the number of connections based on a predefined key
, such as the client’s IP address.
limit_conn 指令
http {
limit_conn_zone $binary_remote_addr zone=limitbyaddr:10m;
limit_conn_status 429;
...
server {
...
limit_conn limitbyaddr 40;
...
}
}
This configuration creates a shared memory zone named limit
byaddr. The predefined key used is the client’s IP address in binary
form. The size of the shared memory zone is set to 10 mega‐
bytes. The limit_conn directive takes two parameters: a
limit_conn_zone name, and the number of connections allowed.
The limit_conn_status sets the response when the connections are
limited to a status of 429, indicating too many
requests. The limit_conn and limit_conn_status directives are
valid in the HTTP, server, and location context.
12.2 Limiting Rate
频率
http {
limit_req_zone $binary_remote_addr
zone=limitbyaddr:10m rate=1r/s;
limit_req_status 429;
...
server {
...
limit_req zone=limitbyaddr burst=10 nodelay;
...
}
}
12.3 Limiting Bandwidth
带宽限制, 自动降速
location /download/ {
limit_rate_after 10m;
limit_rate 1m;
}
13.1 Client-Side Encryption
You need to encrypt traffic between your NGINX server and the client.
加密传输,SSL modules such as the ngx_http_ssl_module
or ngx_stream_ssl_module
http { # All directives used below are also valid in stream
server {
listen 8433 ssl;
ssl_protocols TLSv1.2;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_certificate /usr/local/nginx/conf/cert.pem;
ssl_certificate_key /usr/local/nginx/conf/cert.key;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
}
}
更新阅读
https://docs.w3cub.com/nginx/http/ngx_http_ssl_module/#example
https://docs.w3cub.com/nginx/stream/ngx_stream_ssl_module/#example
13.2 Upstream Encryption
You need to encrypt traffic between NGINX and the upstream service
and set specific negotiation rules for compliance regulations
or if the upstream is outside of your secured network.
location / {
proxy_pass https://upstream.example.com;
proxy_ssl_verify on;
proxy_ssl_verify_depth 2;
proxy_ssl_protocols TLSv1.2;
}
分为两个步骤,创建密码文件, 设定nginx 配置
14.1 Creating a User File
文件格式
# comment
name1:password1
name2:password2:comment
name3:password3
或者通过以下命令(先安装openssl)
openssl passwd MyPassword1234
14.2 Using Basic Authentication
location / {
auth_basic "Private site";
auth_basic_user_file conf.d/passwd;
}
15.1 Authentication Subrequests
Use the http_auth_request_module to make a request to the
authentication service to verify identity before serving the request:
location /private/ {
auth_request /auth;
auth_request_set $auth_status $upstream_status;
}
location = /auth {
internal;
proxy_pass http://auth-server;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
proxy_set_header X-Original-URI $request_uri;
}
16.1 Securing a Location
Use the secure link module and the secure_link_secret directive
to restrict access to resources to users who have a secure link:
location /resources {
secure_link_secret mySecret;
if ($secure_link = "") { return 403; }
rewrite ^ /secured/$secure_link;
}
location /secured/ {
internal;
root /var/www;
}
16.2 Generating a Secure Link with a Secret (生成一个安全链接)
You need to generate a secure link from your application
using a
secret.
The secure link module
in NGINX accepts the hex digest of an md5
hashed string, where the string is a concatenation of the URI path
and the secret.
echo -n 'index.htmlmySecret' | openssl md5 -hex
(stdin)= a53bee08a4bf0bbea978ddf736363a12
Python示例
import hashlib
hashlib.md5.(b'index.htmlmySecret').hexdigest()
'a53bee08a4bf0bbea978ddf736363a12
Now that we have this hash digest, we can use it in a URL. Our
example will be for www.example.com making a request for the
file /var/www/secured/index.html
through our /resources location.
Our full URL will be the following:
www.example.com/resources/a53bee08a4bf0bbea978ddf736363a12/\
index.html
16.3 Securing a Location with an Expire Date
location /resources {
root /var/www;
secure_link $arg_md5,$arg_expires;
secure_link_md5 "$secure_link_expires$uri$remote_addr
mySecret";
if ($secure_link = "") { return 403; }
if ($secure_link = "0") { return 410; }
}
secure_link directive 有两个参数,第一个参数是保存md5哈希的变量;第二个参数是保存链接的到期时间(Unix epoch time format)
16.4 Generating an Expiring Link
创建一个时间戳(Unix epoch time format)
date -d "2020-12-31 00:00" +%s --utc
# 1609372800
Next you’ll need to concatenate your hash string to match the string
configured with the secure_link_md5 directive. In this case, our
string to be used will be 1293771600/resources/
index.html127.0.0.1 mySecret. The md5 hash is a bit different
than just a hex digest. It’s an md5 hash in binary format, base64 enco‐
ded, with plus signs (+) translated to hyphens (-), slashes (/) trans‐
lated to underscores (_), and equal (=) signs removed. The following
is an example on a Unix system:
echo -n '1609372800/resources/index.html127.0.0.1 mySecret' \
| openssl md5 -binary \
| openssl base64 \
| tr +/ -_ \
| tr -d =
# TG6ck3OpAttQ1d7jW3JOcw
Now that we have our hash, we can use it as an argument along with
the expire date:
/resources/index.html?md5=TG6ck3OpAttQ1d7jW3JOcw&expires=1609372800’
from datetime import datetime, timedelta
from base64 import b64encode
import hashlib
# Set environment vars
resource = b'/resources/index.html'
remote_addr = b'127.0.0.1'
host = b'www.example.com'
mysecret = b'mySecret'
# Generate expire timestamp
now = datetime.utcnow()
expire_dt = now + timedelta(hours=1)
expire_epoch = str.encode(expire_dt.strftime('%s'))
# md5 hash the string
uncoded = expire_epoch + resource + remote_addr + mysecret
md5hashed = hashlib.md5(uncoded).digest()
# Base64 encode and transform the string
b64 = b64encode(md5hashed)
unpadded_b64url = b64.replace(b'+', b'-')\
.replace(b'/', b'_')\
.replace(b'=', b'')
# Format and generate the link
linkformat = "{}{}?md5={}?expires={}"
securelink = linkformat.format(
host.decode(),
resource.decode(),
unpadded_b64url.decode(),
expire_epoch.decode()
)
print(securelink)
20.1 HTTPS Redirects
Use a rewrite to send all HTTP traffic to HTTPS:
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
return 301 https://$host$request_uri;
}
20.2 Redirecting to HTTPS Where SSL/TLS Is Terminated Before NGINX
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
if ($http_x_forwarded_proto = 'http') {
return 301 https://$host$request_uri;
}
}
This configuration is very much like HTTPS redirects. However, in
this configuration we’re only redirecting if the header X-Forwarded-Proto
is equal to HTTP.
20.3 HTTP Strict Transport Security
You need to instruct browsers to never
send requests over HTTP
Use the HTTP Strict Transport Security (HSTS) enhancement by
setting the Strict-Transport-Security header:
add_header Strict-Transport-Security max-age=31536000;
This configuration sets the Strict-Transport-Security header to a
max age of a year. This will instruct the browser to always do an
internal redirect when HTTP requests are attempted to this domain,
so that all requests will be made over HTTPS
.
20.4 Satisfying Any Number of Security Methods
Use the satisfy directive
location / {
satisfy any;
allow 192.168.1.0/24;
deny all;
auth_basic "closed site";
auth_basic_user_file conf/htpasswd;
}
本文: https://github.com/icai/nginxcooking
-EOF-
]]>以下链接, Google Developers Service Worker工作原理:
https://developers.google.com/web/fundamentals/primers/service-workers/lifecycle#updates
但是假如某一天你网站不需要 Service Worker,如何正确地卸载呢?
以下以 create-react-app 为例子:
那么你的服务器文件service-worker.js永远都在,假如用户之前访问了你的网站,并且用户不清缓存的话,cache就永远都在了,你怎么更新,用户还是访问旧的版本。
service-worker.js
'use strict';
var precacheConfig = [
["/index.html","a16310808c31e9e89b8d72aa2ddb058c"],
["/plugin.dll.0cf858ac.js","7268282b6a4415b541c4658c1478febc"],
["/vendor.dll.830d2c27.js","097dfeec5dda4f277752cb36b5d548ee"]
];
var cacheName = 'sw-precache-v3-sw-precache-webpack-plugin-' + (self.registration ? self.registration.scope : '');
var ignoreUrlParametersMatching = [/^utm_/];
var addDirectoryIndex = function (originalUrl, index) {
var url = new URL(originalUrl);
if (url.pathname.slice(-1) === '/') {
url.pathname += index;
}
return url.toString();
};
var cleanResponse = function (originalResponse) {
// If this is not a redirected response, then we don't have to do anything.
if (!originalResponse.redirected) {
return Promise.resolve(originalResponse);
}
// Firefox 50 and below doesn't support the Response.body stream, so we may
// need to read the entire body to memory as a Blob.
var bodyPromise = 'body' in originalResponse ?
Promise.resolve(originalResponse.body) :
originalResponse.blob();
return bodyPromise.then(function(body) {
// new Response() is happy when passed either a stream or a Blob.
return new Response(body, {
headers: originalResponse.headers,
status: originalResponse.status,
statusText: originalResponse.statusText
});
});
};
var createCacheKey = function (originalUrl, paramName, paramValue,
dontCacheBustUrlsMatching) {
// Create a new URL object to avoid modifying originalUrl.
var url = new URL(originalUrl);
// If dontCacheBustUrlsMatching is not set, or if we don't have a match,
// then add in the extra cache-busting URL parameter.
if (!dontCacheBustUrlsMatching ||
!(url.pathname.match(dontCacheBustUrlsMatching))) {
url.search += (url.search ? '&' : '') +
encodeURIComponent(paramName) + '=' + encodeURIComponent(paramValue);
}
return url.toString();
};
var isPathWhitelisted = function (whitelist, absoluteUrlString) {
// If the whitelist is empty, then consider all URLs to be whitelisted.
if (whitelist.length === 0) {
return true;
}
// Otherwise compare each path regex to the path of the URL passed in.
var path = (new URL(absoluteUrlString)).pathname;
return whitelist.some(function(whitelistedPathRegex) {
return path.match(whitelistedPathRegex);
});
};
var stripIgnoredUrlParameters = function (originalUrl,
ignoreUrlParametersMatching) {
var url = new URL(originalUrl);
// Remove the hash; see https://github.com/GoogleChrome/sw-precache/issues/290
url.hash = '';
url.search = url.search.slice(1) // Exclude initial '?'
.split('&') // Split into an array of 'key=value' strings
.map(function(kv) {
return kv.split('='); // Split each 'key=value' string into a [key, value] array
})
.filter(function(kv) {
return ignoreUrlParametersMatching.every(function(ignoredRegex) {
return !ignoredRegex.test(kv[0]); // Return true iff the key doesn't match any of the regexes.
});
})
.map(function(kv) {
return kv.join('='); // Join each [key, value] array into a 'key=value' string
})
.join('&'); // Join the array of 'key=value' strings into a string with '&' in between each
return url.toString();
};
var hashParamName = '_sw-precache';
var urlsToCacheKeys = new Map(
precacheConfig.map(function(item) {
var relativeUrl = item[0];
var hash = item[1];
var absoluteUrl = new URL(relativeUrl, self.location);
var cacheKey = createCacheKey(absoluteUrl, hashParamName, hash, /\.\w{8}\./);
return [absoluteUrl.toString(), cacheKey];
})
);
function setOfCachedUrls(cache) {
return cache.keys().then(function(requests) {
return requests.map(function(request) {
return request.url;
});
}).then(function(urls) {
return new Set(urls);
});
}
self.addEventListener('install', function(event) {
event.waitUntil(
caches.open(cacheName).then(function(cache) {
return setOfCachedUrls(cache).then(function(cachedUrls) {
return Promise.all(
Array.from(urlsToCacheKeys.values()).map(function(cacheKey) {
// If we don't have a key matching url in the cache already, add it.
if (!cachedUrls.has(cacheKey)) {
var request = new Request(cacheKey, {credentials: 'same-origin'});
return fetch(request).then(function(response) {
// Bail out of installation unless we get back a 200 OK for
// every request.
if (!response.ok) {
throw new Error('Request for ' + cacheKey + ' returned a ' +
'response with status ' + response.status);
}
return cleanResponse(response).then(function(responseToCache) {
return cache.put(cacheKey, responseToCache);
});
});
}
})
);
});
}).then(function() {
// Force the SW to transition from installing -> active state
return self.skipWaiting();
})
);
});
self.addEventListener('activate', function(event) {
var setOfExpectedUrls = new Set(urlsToCacheKeys.values());
event.waitUntil(
caches.open(cacheName).then(function(cache) {
return cache.keys().then(function(existingRequests) {
return Promise.all(
existingRequests.map(function(existingRequest) {
if (!setOfExpectedUrls.has(existingRequest.url)) {
return cache.delete(existingRequest);
}
})
);
});
}).then(function() {
return self.clients.claim();
})
);
});
self.addEventListener('fetch', function(event) {
if (event.request.method === 'GET') {
// Should we call event.respondWith() inside this fetch event handler?
// This needs to be determined synchronously, which will give other fetch
// handlers a chance to handle the request if need be.
var shouldRespond;
// First, remove all the ignored parameters and hash fragment, and see if we
// have that URL in our cache. If so, great! shouldRespond will be true.
var url = stripIgnoredUrlParameters(event.request.url, ignoreUrlParametersMatching);
shouldRespond = urlsToCacheKeys.has(url);
// If shouldRespond is false, check again, this time with 'index.html'
// (or whatever the directoryIndex option is set to) at the end.
var directoryIndex = 'index.html';
if (!shouldRespond && directoryIndex) {
url = addDirectoryIndex(url, directoryIndex);
shouldRespond = urlsToCacheKeys.has(url);
}
// If shouldRespond is still false, check to see if this is a navigation
// request, and if so, whether the URL matches navigateFallbackWhitelist.
var navigateFallback = '/index.html';
if (!shouldRespond &&
navigateFallback &&
(event.request.mode === 'navigate') &&
isPathWhitelisted(["^(?!\\/__).*"], event.request.url)) {
url = new URL(navigateFallback, self.location).toString();
shouldRespond = urlsToCacheKeys.has(url);
}
// If shouldRespond was set to true at any point, then call
// event.respondWith(), using the appropriate cache key.
if (shouldRespond) {
event.respondWith(
caches.open(cacheName).then(function(cache) {
return cache.match(urlsToCacheKeys.get(url)).then(function(response) {
if (response) {
return response;
}
throw Error('The cached response that was expected is missing.');
});
}).catch(function(e) {
// Fall back to just fetch()ing the request if some unexpected error
// prevented the cached response from being valid.
console.warn('Couldn\'t serve response for "%s" from cache: %O', event.request.url, e);
return fetch(event.request);
})
);
}
}
});
index.js 入口文件
https://github.com/facebook/create-react-app/blob/next/packages/react-scripts/template/src/index.js
import React from 'react';
import ReactDOM from 'react-dom';
import './index.css';
import App from './App';
import * as serviceWorker from './serviceWorker';
ReactDOM.render(<App />, document.getElementById('root'));
// If you want your app to work offline and load faster, you can change
// unregister() to register() below. Note this comes with some pitfalls.
// Learn more about service workers: http://bit.ly/CRA-PWA
serviceWorker.unregister();
//----vs-----
serviceWorker.register({
onUpdate: async (registration) => {
await registration.update();
message.info("网站更新完成, 请刷新页面: " + moment().format('YYYY-MM-DD HH:mm:ss'), 0.5, () => {
window.location.reload();
});
},
onSuccess: () => {}
});
假如你引入了serviceWorker文件, 并发布了,
https://github.com/facebook/create-react-app/blob/next/packages/react-scripts/template/src/serviceWorker.js
serviceWorker.register();
改成 serviceWorker.unregister();
但是同时千万要记住 要保留 sw-precache-webpack-plugin 去做webpack 构建(目的是为了生成新的service-worker.js,触发更新)。按照人的既定思维,既然不要了,那么当然要移除。
假如移除了 sw-precache-webpack-plugin, 你怎么 生成新版本的 service-worker.js,还有,没有新版本 service-worker.js 又怎么会更新你的代码了,这里似乎出现双重陷阱,但是当你理解了service-worker.js 生命周期原理后,一切都可以理解。
在入口加入:
serviceWorker.unregister();
service-worker.js 文件 依旧需要更新。
假如真的不想引入 sw-precache-webpack-plugin 做webpack构建的话,请把服务器上面的
service-worker.js precacheConfig
清空
var precacheConfig = [
];
https://github.com/facebook/create-react-app/blob/master/packages/react-scripts/template/README.md#opting-out-of-caching
If you would prefer not to enable service workers prior to your initial production deployment, then remove the call to registerServiceWorker() from src/index.js.
If you had previously enabled service workers in your production deployment and have decided that you would like to disable them for all your existing users, you can swap out the call to registerServiceWorker() in src/index.js first by modifying the service worker import:
import { unregister } from './registerServiceWorker';
and then call unregister() instead. After the user visits a page that has unregister(), the service worker will be uninstalled. Note that depending on how /service-worker.js is served, it may take up to 24 hours for the cache to be invalidated.
create-react-app 提示的测试服务器
对service-worker.js会有HTTP缓存,部署简单nginx 服务器进行测试
server {
listen 8000;
# listen somename:8080;
# server_name somename alias another.alias;
location / {
root D:\yourproject\build;
index index.html index.htm;
# proxy_no_cache 1;
add_header Cache-Control "no-cache";
try_files $uri $uri/ /index.html;
}
location /service\-worker\.js {
expires -1;
add_header Pragma "no-cache";
}
}
serviceWorker.register({
onUpdate: async (registration) => {
await registration.update(); // 这里很重要
message.info("网站更新完成, 请刷新页面: " + moment().format('YYYY-MM-DD HH:mm:ss'), 0.5, () => {
window.location.reload();
});
},
onSuccess: () => {}
});
https://lavas.baidu.com/guide/v2/advanced/service-worker#%E6%B3%A8%E5%86%8C-service-worker-%E6%89%A9%E5%B1%95
注册 Service Worker (扩展)
提示:这部分内容由 Lavas 内部处理,并不需要开发者进行参与,仅仅作为解答开发者疑问的扩展阅读存在。
Service Worker 编写完成后,还需要进行注册才能真正生效。常规的注册代码能够在各类 Service Worker 教程或文章中找到,但在实际项目中有一个不得不考虑的问题,使得我们必须对注册代码进行一些改动,那就是 Service Worker 更新 的问题。
https://github.com/lavas-project/sw-register-webpack-plugin
离线指南
https://developers.google.com/web/fundamentals/instant-and-offline/offline-cookbook/#cache-falling-back-to-network
假如熟悉Service Worker 缓存机制的话,那么为什么要卸载呢 ?
本文 : https://github.com/icai/icai.github.io/issues/1
-EOF-
]]>《Data Push Apps with HTML5 SSE》 读书摘要
<!doctype html>
<html>
<head>
<meta charset="UTF-8">
<title>Basic SSE Example</title>
<script src="//code.jquery.com/jquery-1.11.0.min.js"></script>
</head>
<body>
<pre id="x">Initializing...</pre>
<script>
var es = new EventSource("basic_sse.php");
es.addEventListener("message", function(e){
$("#x").append("\n" + e.data);
},false);
</script>
</body>
</html>
var http = require("http"),
fs = require("fs");
var port = parseInt(process.argv[2] || 1234);
http.createServer(function(request, response) {
console.log("Client connected:" + request.url);
if (request.url != "/sse") {
fs.readFile("basic_sse.html", function(err, file) {
response.writeHead(200, { 'Content-Type': 'text/html' });
var s = file.toString(); //file is a buffer
s = s.replace("basic_sse.php", "sse");
response.end(s);
});
return;
}
//Below is to handle SSE request. It never returns.
response.writeHead(200, { "Content-Type": "text/event-stream" });
var timer = setInterval(function() {
var content = "data:" + new Date().toISOString() + "\n\n";
var b = response.write(content);
if (!b) console.log("Data got queued in memory (content=" + content + ")");
else console.log("Flushed! (content=" + content + ")");
}, 1000);
request.connection.on("close", function() {
response.end();
clearInterval(timer);
console.log("Client closed connection. Aborting.");
});
}).listen(port);
console.log("Server running at http://localhost:" + port);
startEventSource()
startXHR()
startIframe()
~~startLongpoll()
~~(none)
~~a Technically since Firefox 6 and Chrome 6, but they have been auto-updating since Firefox 4, and Chrome since it came out
of beta, so you can reasonably expect no one is still using versions that do not support SSE.
~~
书本代码,书本代码,书本代码。
-EOF-
]]>Scrapy是一个基于Python的高级爬虫框架,只需要简单的配置就可以实现你大大的需求。
为什么要介绍Scrapy,内容时代了,没有内容再好看的网站,那又有什么用呢?
对不对?对对对。
学习scrapy有什么需求吗?
python是基于python语言的,那么自然你要懂一些python的基础啦!
还有简单的xpath语法或类似于jQuery选择器语法,一边用一边学也可以的。
建议在Linux或者Mac下进行学习,Windows下可能会遇到各种入门级的问题。例如安装过程中报错,缺少这个,那个的问题。
只需要准备一样东西,就是官方文档 https://docs.scrapy.org/en/latest/
可以一边看,一边实践。
pip install Scrapy
官方推荐你安装python虚拟环境进行 Scrapy,不装也没有问题! 虚拟环境会虚拟IP么? 不会,不是这一回事
pip install virtualenv
接着来来读一下官方教程:
https://docs.scrapy.org/en/latest/intro/tutorial.html
scrapy startproject 项目名字
A: 假如你想知道parse接着会干什么,parse里面能干什么,我觉得你去了解一下Scrapy的钩子函数,以及整体架构。
B: 假如你对yield语法不是很了解的,可以去理解一下yield语法(python基础)。
入门是很快的。
假如你感兴趣,接着你可能会提出一些问题:
比如,怎么在一个页面抓取多条?怎么去重?等等
为什么会介绍Scrapy?
其实Scrapy不单只是一个爬虫框架,而且是一个很好的Python入门学习项目,假设你怎么会Python。
-EOF-
]]>2016的计划完成了多少,小目标还是有的,天马行空的就别谈了。该写的写了,头脑发热能折腾到2,3点。
2016看了很多电视,看了很多时政新闻,把台湾大选全程直播都看了,你懂的。
2016年底离开了工作三年的TheCN(CourseNetworking),结束了”Changing the Way the World Learns”,也是我工作五年里面最长的一份工作。没事周五就溜回家,可能这也是在TheCN工作的特殊记忆。
2016年没有过多的留念与怀念,参加了几场婚礼,大部分都是中学同学。
翻开微博,看一看2016的自己都干了什么,都是都是时事之类的观点,微博并没有过多的私隐东西,再翻开QQ空间也没有记录太多特别的,平淡的2016,还是发生了什么特别的事情,我没有记录。回去参加同学同学的婚礼,同学说,怎么你还是跟以前一模一样,好像一点都没有变化。好像也是,除了瘦了几斤,好像真的没有多大变化。
再翻看Github看看,2013就想写的chrome插件,终于在2016写了,还有一堆基于公司需求的开源扩展。
回想起,这些年从事前端工作,头一两年,那些人还在炫耀IE6,IE7 bug,本质上是清零了。剩下的经验都是自己平时阅读开源代码积累的。kissy, ueditor, jQuery, Backbone, underscore, requriejs…… 感谢这些优秀的开源代码。
当别人问Angular,react,vue,gulp, webpack…… 你懂么的时候,还好我还没有掉队。 会点吧。
2016后端语言写的怎么样了,有用的都学了,都是一边做一边学。ruby,python, php …… 用记得,不用就忘记 ……
或许这是今年的目标,“今年要不要去拿个软件专利著作权,2017”,说白就是给自己一个学习的催化剂。
不知道能不能完成,但是当开始了,就意味着收获。与其说是2017年的目标,不如说是没有完成计划。
2016好像写博客的次数越来越少了,Greader阅读习惯也少了, 反而印象笔记的收藏多了。
2016过了, 一直想去旅游,可惜缺少游伴。辞职的时候,同事说,去个旅游先吧。我也想,去哪里好呢,想不到,没有游伴。
2016,2017能为你做的什么呢!急不了…… 每次都想勤点写blog,可惜总不能坚持,一是想不到想写的内容,二或许不能出口成文,文笔空洞,放弃了。
回到家里看见那本《网页设计三剑客8》,还有那一箱高高的电脑爱好者,还有那些年天天折腾Ghost的日子,这些年走来,真的不容易。以前同事说,这就叫做青春。
可能吧,可能这些叫青春。
2017给自己的生活作息定个目标吧,同学说都我瘦了,可能作息不规律吧。这几年看见了离别的离别,住院的住院。
给自己生活一个小目标吧:定期饮食,增肥,定期运动,定时睡眠。
不知不觉,真正工作已经步入了第六个年头了。
2017继续拥抱变化,拥抱生活吧。
]]>cozhihu是一款知乎皮肤chrome扩展应用。
多彩知乎十分适合geek知乎用户。随时随地刷知乎,保护眼睛两不误。
或者到官网 http://cozhihu.w3cub.com/进行安装。
Copyright (c) 2016 Terry Cai. Licensed under the MIT license.
开源地址:https://github.com/icai/cozhihu
刷知乎,防沉迷哦!
cozhihu is a chrome extension for zhihu skins.
If you like this, Give me star please:
]]>A few days ago, I rewrite the project css to sass, so I need to rename the css. So I brought out a question, how to “Rename multiple files shell”? To people, solving problems with search engine is the method of least time. I am no exception, I encountered the same problem, and find out the answer in stackoverflow https://stackoverflow.com/questions/6911301/rename-multiple-files-shell.
for file in linux_*.mp4 ; do mv "$file" "${file#linux_}" ; done
But this isnot for my question, so I need to modify this shell ,in order to suit for my situation. I also I found this solution in the comments on the questionhttp://pubs.opengroup.org/onlinepubs/9699919799/.And you may want to know about the POSIX.
After check the POSIX wiki, you know that. The Portable Operating System Interface (POSIX) is a family of standards specified by the IEEE Computer Society for maintaining compatibility between operating systems. POSIX defines the application programming interface (API), along with command line shells and utility interfaces, for software compatibility with variants of Unix and other operating systems.
But now, we should focus on solving my question. I review the stackoverflow question and the best answer.
Q: I have multiple files in a directory, example: linux_file1.mp4
, linux_file2.mp4
and so on. How do I move these files, using shell, so that the names are file1.mp4
, file2.mp4
and so on. I have about 30 files that I want to move to the new name.
A: for file in linux_*.mp4 ; do mv "$file" "${file#linux_}" ; done
So I guess ${file#linux_}
is remove linux_
from the filename. As my guess, i am right, you can have a look at the documentation. I suggest that you should read the documentation at lest one time. And about the syntax of bash shell you can type the bash help in your terminal.
~$>help
GNU bash,版本 4.3.11(1)-release (x86_64-pc-linux-gnu)
这些 shell 命令是内部定义的。请输入 `help' 以获取一个列表。
输入 `help 名称' 以得到有关函数`名称'的更多信息。
使用 `info bash' 来获得关于 shell 的更多一般性信息。
使用 `man -k' 或 `info' 来获取不在列表中的命令的更多信息。
名称旁边的星号(*)表示该命令被禁用。
job_spec [&] history [-c] [-d 偏移量] [n] 或 history >
(( 表达式 )) if 命令; then 命令; [ elif 命令; then 命令; >
. 文件名 [参数] jobs [-lnprs] [任务声明 ...] 或 jobs -x 命>
: kill [-s 信号声明 | -n 信号编号 | -信号声明] 进程号>
[ 参数... ] let 参数 [参数 ...]
[[ 表达式 ]] local [option] 名称[=值] ...
alias [-p] [名称[=值] ... ] logout [n]
bg [任务声明 ...] mapfile [-n 计数] [-O 起始序号] [-s 计数] [->
bind [-lpsvPSVX] [-m keymap] [-f file> popd [-n] [+N | -N]
break [n] printf [-v var] 格式 [参数]
builtin [shell 内建 [参数 ...]] pushd [-n] [+N | -N | 目录]
caller [表达式] pwd [-LP]
case 词 in [模式 [| 模式]...) 命令 ;;]... es> read [-ers] [-a 数组] [-d 分隔符] [-i 缓冲区>
cd [-L|[-P [-e]] [-@]] [dir] readarray [-n 计数] [-O 起始序号] [-s 计数] >
command [-pVv] 命令 [参数 ...] readonly [-aAf] [名称[=值] ...] 或 reado>
compgen [-abcdefgjksuv] [-o 选项] [-A > return [n]
complete [-abcdefgjksuv] [-pr] [-DE] > select NAME [in 词语 ... ;] do 命令; don>
compopt [-o|+o 选项] [-DE] [名称 ...] set [--abefhkmnptuvxBCHP] [-o 选项名] [>
continue [n] shift [n]
coproc [名称] 命令 [重定向] shopt [-pqsu] [-o] [选项名 ...]
declare [-aAfFgilnrtux] [-p] [name[=v> source 文件名 [参数]
dirs [-clpv] [+N] [-N] suspend [-f]
disown [-h] [-ar] [任务声明 ...] test [表达式]
echo [-neE] [参数 ...] time [-p] 管道
enable [-a] [-dnps] [-f 文件名] [名称 ...]> times
eval [参数 ...] trap [-lp] [[参数] 信号声明 ...]
exec [-cl] [-a 名称] [命令 [参数 ...]] [重定向> true
exit [n] type [-afptP] 名称 [名称 ...]
export [-fn] [名称[=值] ...] 或 export -p> typeset [-aAfFgilrtux] [-p] 名称[=值] .>
false ulimit [-SHabcdefilmnpqrstuvxT] [lim>
fc [-e 编辑器名] [-lnr] [起始] [终结] 或 fc -s> umask [-p] [-S] [模式]
fg [任务声明] unalias [-a] 名称 [名称 ...]
for 名称 [in 词语 ... ] ; do 命令; done unset [-f] [-v] [-n] [name ...]
for (( 表达式1; 表达式2; 表达式3 )); do 命令; do> until 命令; do 命令; done
function 名称 { 命令 ; } 或 name () { 命令 ;> variables - 一些 shell 变量的名称和含义
getopts 选项字符串 名称 [参数] wait [-n] [id ...]
hash [-lr] [-p 路径名] [-dt] [名称 ...] while 命令; do 命令; done
help [-dms] [模式 ...] { 命令 ; }
Now, I brought out my question :
Q: Rename the css suffix all current directory file .css
to .scss
and underscore the file prefix.
A: for file in *.css ; do mv "$file" "_${file%.css}.scss" ; done
About the syntax, you can have a look at the chapter 2.6.2 Parameter Expansion
.
…
${parameter%[word]}
Remove Smallest Suffix Pattern. The word shall be expanded to produce a pattern. The parameter expansion shall then result in parameter, with the smallest portion of the suffix matched by the pattern deleted. If present, word shall not begin with an unquoted ‘%’.
${parameter%%[word]}
Remove Largest Suffix Pattern. The word shall be expanded to produce a pattern. The parameter expansion shall then result in parameter, with the largest portion of the suffix matched by the pattern deleted.
${parameter#[word]}
Remove Smallest Prefix Pattern. The word shall be expanded to produce a pattern. The parameter expansion shall then result in parameter, with the smallest portion of the prefix matched by the pattern deleted. If present, word shall not begin with an unquoted ‘#’.
${parameter##[word]}
Remove Largest Prefix Pattern. The word shall be expanded to produce a pattern. The parameter expansion shall then result in parameter, with the largest portion of the prefix matched by the pattern deleted.
…
My question has been solved.
Good luck to you.
]]>As you know, Octopress 2.x is based on Jekyll 2.x. so “Upgrade from Octopress 2.x to Jekyll 3.x” mean that upgrading Jekyll 2.x to Jekyll 3.x.
Firstly, Check out and have a look the offcial upgrade turtuial https://jekyllrb.com/docs/upgrading/2-to-3/
Secondly, upgrade the Gemfile
file, gem 'pygments.rb'
, gem 'jekyll','~> 3.1.6'
, gem 'jekyll-sitemap'
,gem 'jekyll-paginate', '~> 1.1'
, etc. and than remove the Gemfile.lock
file and run command bundle install
. If in the gem install process, you catch out the error or some dependencies dependency confliction, upgrading it to the newest version could be ok.
And Then, Add gems: [jekyll-paginate]
in your _config.yml
file and remove Octopress-hooks
plugin. if you use the octopress_filters.rb
, you need to change the following Code:
Jekyll::Hooks.register :page, :pre_render do |page|
OctopressFilters::pre_filter(page)
end
Jekyll::Hooks.register :page, :post_render do |page|
OctopressFilters::post_render(page)
end
Jekyll::Hooks.register :post, :pre_render do |post|
OctopressFilters::pre_filter(post)
end
Jekyll::Hooks.register :post, :post_render do |post|
OctopressFilters::post_render(post)
end
And the sitemap_generator.rb
should be upgrade to the newest version.
The above problem is that I encountered during the upgrade process, only for reference. I suggest that when you upgrade the jekyll like me, you should use the the command jekyll build --trace
instead of rake preview
that you can catch out the error easily.
docs.w3cub project is based on open source<iframe class="github-btn" src="https://ghbtns.com/github-btn.html?user=Thibaut&repo=devdocs&type=star&count=true" allowtransparency="true" frameborder="0" scrolling="0" width="100" height="20"></iframe>. we use jekyll system to rewrite and hosted on Github Pages. This project continued for several months and written in my spare time.
As you know, all pages are static. we think that this would be more fast than using backend router. The most important thing is that Github Pages is free. we also thought about using frondend history router to rewrite this project, but Github Pages don’t support the nginx rule. However we have permission to use 404 page, but useing the 404 page as the frontend router is very crazy idea. So at last we use the jekyll system to generate the static pages.
After subsequent improvement, we will open some jekyll plug-ins and source code.
Following credits copy from devdocs.
Documentation | Copyright | License |
---|---|---|
Angular.js | © 2010-2015 Google, Inc. | CC BY |
Apache HTTP Server | © The Apache Software Foundation | Apache |
Backbone.js | © 2010-2015 Jeremy Ashkenas, DocumentCloud | MIT |
Bower | © 2015 Bower contributors | CC BY |
C C++ | © cppreference.com | CC BY-SA |
Chai | © 2011-2015 Jake Luer | MIT |
Clojure | © Rich Hickey | EPL |
CoffeeScript | © 2009-2015 Jeremy Ashkenas | MIT |
Cordova | © 2012-2015 The Apache Software Foundation | Apache |
CSS DOM HTML JavaScript SVG XPath | © 2005-2015 Mozilla Developer Network and individual contributors | CC BY-SA |
D3.js | © 2015 Michael Bostock | BSD |
Django | © Django Software Foundation and individual contributors | BSD |
Dojo | © 2005-2015 The Dojo Foundation | BSD + AFL |
Drupal | © 2001-2015 by the original authors Drupal is a registered trademark of Dries Buytaert. | GPLv2 |
Ember.js | © 2015 Yehuda Katz, Tom Dale and Ember.js contributors | MIT |
Elixir | © 2012 Plataformatec | Apache |
Express | © 2009-2015 TJ Holowaychuk | MIT |
Git | © 2005-2015 Linus Torvalds and others | GPLv2 |
Go | © Google, Inc. | CC BY |
Grunt | © 2014 Grunt Team | MIT |
Haskell | © The University of Glasgow | BSD |
io.js | © io.js contributors | MIT |
jQuery | © 2009 Packt Publishing © 2014 jQuery Foundation | MIT |
jQuery Mobile | © 2014 jQuery Foundation | MIT |
jQuery UI | © 2014 jQuery Foundation | MIT |
Knockout.js | © Steven Sanderson, the Knockout.js team, and other contributors | MIT |
Laravel | © Taylor Otwell | MIT |
Less | © 2009-2015 The Core Less Team | CC BY |
Lo-Dash | © 2012-2015 The Dojo Foundation | MIT |
Lua | © 1994–2015 Lua.org, PUC-Rio | MIT |
Marionette.js | © 2015 Muted Solutions, LLC | MIT |
Markdown | © 2004 John Gruber | BSD |
Meteor | © 2011-2015 Meteor Development Group | MIT |
Minitest | © Ryan Davis, seattle.rb | MIT |
Mocha | © 2011-2015 TJ Holowaychuk | MIT |
Modernizr | © 2009-2014 Modernizr | MIT |
Moment.js | © 2011-2015 Tim Wood, Iskren Chernev, Moment.js contributors | MIT |
Mongoose | © 2010 LearnBoost | MIT |
nginx | © 2002-2015 Igor Sysoev © 2011-2015 Nginx, Inc. | BSD |
Node.js | © Joyent, Inc. and other Node contributors Node.js is a trademark of Joyent, Inc. | MIT |
Nokogiri | © 2008-2014 2014 Aaron Patterson, Mike Dalessio, Charles Nutter, Sergio Arbeo, Patrick Mahoney, Yoko Harada, Akinori Musha | MIT |
npm | © npm, Inc. and Contributors npm is a trademark of npm, Inc. | npm |
OpenTSDB | © 2010-2015 The OpenTSDB Authors | LGPLv2.1 |
Phalcon | © 2011-2015 Phalcon Framework Team | CC BY |
Phaser | © 2015 Richard Davey, Photon Storm Ltd. | MIT |
Phoenix | © 2014 Chris McCord | MIT |
PHP | © 1997-2015 The PHP Documentation Group | CC BY |
PHPUnit | © 2005-2015 Sebastian Bergmann | CC BY |
PostgreSQL | © 1996-2013 The PostgreSQL Global Development Group © 1994 The Regents of the University of California | PostgreSQL |
Python | © 1990-2015 Python Software Foundation Python is a trademark of the Python Software Foundation. | PSFL |
Q | © 2009-2015 Kristopher Michael Kowal and contributors | MIT |
React, React Native, Flow, Relay | © 2013-2015 Facebook Inc. | CC BY |
Redis | © 2009-2015 Salvatore Sanfilippo | CC BY-SA |
RequireJS | © 2010-2014 The Dojo Foundation | MIT |
RethinkDB | © RethinkDB contributors | CC BY-SA |
Ruby | © 1993-2015 Yukihiro Matsumoto | Ruby |
Ruby on Rails | © 2004-2015 David Heinemeier Hansson Rails, Ruby on Rails, and the Rails logo are trademarks of David Heinemeier Hansson. | MIT |
Rust | © 2011-2015 The Rust Project Developers | MIT |
Sass | © 2006-2015 Hampton Catlin, Nathan Weizenbaum, and Chris Eppstein | MIT |
Sinon | © 2010-2015 Christian Johansen | BSD |
Socket.io | © 2014-2015 Automattic | MIT |
Symfony | © 2004-2015 Fabien Potencier | MIT |
Underscore.js | © 2009-2015 Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors | MIT |
Vagrant | © 2010-2015 Mitchell Hashimoto | MIT |
Vue.js | © 2013-2015 Evan You, Vue.js contributors | MIT |
Webpack | © 2012-2015 Tobias Koppers | MIT |
Yii | © 2008-2015 by Yii Software LLC | BSD |
About devdocs, you can read this.
After improve these static web pages, I would do the app. If you like this, please wait the coming app.
自动完成一直都是一个没完没了的问题。谁能准确地说出当一个地球人在一个国家输入框输入“uni”打算选什么?可能会出现以下情况:
Of course, it’s probably not the last one, but that right there is a human intuition that we often forget to instil into these UI interactions.
当然,这可能不是最后一个,但人类的直觉通常会忘记灌输这些UI交互。
We can divine what the user probably intends most of the time but it’ll always be a game of heuristics. Most solutions shy away from this game, opting instead to match the query letter-for-letter in each potential value, and this is usually sufficient, but without any other logic not only will “la” match “Latvia” but also “Angola”. And usually “Ltvia” will match nothing whatsoever, even though it’s seemingly obvious what the user is trying to type.
我们可以推断用户在这上面花费的时间,显然这是一个启发式游戏。大多数解决方案都是尝试匹配字母之间可能潜在的值,通常已经足够了,“la” 没有任何逻辑,会匹配 “Latvia” 也会匹配 “Angola”。但绝不会匹配 “Ltvia”,尽管“Ltvia”看似是用户想要的输入类型。
If you try implementing a fuzzy matcher to solve this, the first revelation is that you can’t just boolean-match the query against the data like so many solutions do. You need to score each potential match. Hopefully, in the case of country selection, you end up with a sensible subset of countries that match the query to some reasonable degree. This scoring is necessary so that you know what you’re putting at the top of the list. When typing “U”, the user expects Ukraine or Uzbekistan sooner than Mauritius or Sudan, for example.
假如你想用模糊匹配去解决这个问题。首先,你不能像往常那样用boolean匹配去请求查询数据。你需要计算每个存在匹配的分数。 在选择国家情况下,你最终需要请求匹配查询一些合符逻辑的合理的国家子集。那么这分数是非常有必要的,以便我们可以把它们排到列表的顶部。
(译注, boolean匹配 https://github.com/bevacqua/fuzzysearch)
Oddly, if you looked at the most common autocompletion widget out there (jQuery UI), it doesn’t appear to follow this intuition.
奇怪的是,假如见过最常用的jQuery UI 自动完成(autocompletion)插件,他并没有解决这个问题。
Even the most graceful solutions tend to avoid the muddiness of dealing with mistakes like “untied states” or “leichtenstein”. Sure, the likeliness of a person having to type the name of a country they aren’t intimately familiar with is probably quite low, but people still make mistakes.
即使是最优雅的解决方案,都趋向于忽视模糊处理错误,像“untied states” (united states) 和 “leichtenstein” (liechtenstein )。当然, 就像一个人输入一个国家的名字,他们不熟悉这个国家可能性是非常很低。但人们仍然会犯错误。
I’ve been intrigued by this topic for quite a while and it’s why I originally made relevancy.js. It solves the problem quite well, I think, and it does so in a pretty transparent way, with scores applied for various qualities such as the index of the query within the target string (“king” scores higher than “dom” in “kingdom”, for example), but it’s still a quite a lot of code for such a tiny part of an overall user experience.
我一直被这个话题吸引了很长时间,也是我写 relevancy.js 的原因。它可以很好地处理类似问题,我想,他的原理是非常显而易见的,就是通过分数去区分不同品质(把分数应用于品质)例如,目标字符串查询的索引(正如在查询“kingdom” “的时候,king”的分数比 “dom”高)。但这还是需要相当多的代码去实现这小部分,相对于这样一个整体用户体验的而言。
I have once again been playing with this problem (thanks to a certain tweet) and have so wanted to come up with something stupefyingly graceful.
我又再一次遇上了这个问题(谢谢 tweet 的肯定),我想可以写的更加优雅。
It all starts with a scratch in back of your mind — the one that tells you that your time has come. The world requires you to use regular expressions.
一切从头开始在你的脑海里,这告诉你的时间到了。world requires you (世界需要你) 用正则表达式。
Warning: I don’t sincerely recommend doing any of this. It’s just a bit of fun. It’s probably an inefficient, unreliable, obscure and ultimately foolish endeavour!
Warning: 别分心,不然效率极低的,bebebe~~~
##我们入正题:
A static France might look like this:
静态字符串France 正则:
/^France$/
A more lenient France might be less sensitive to its case:
不区分大小写:
/^france$/i
We could then allow the characters to be optional too:
我们也可以允许字符是可选的
/^f?r?a?n?c?e?$/i
This would match “f” and “franc” and “FaE”, etc.
这会匹配 “f”, “franc” 和 “FaE”等等。
But… users make even more grievous mistakes sometimes, and our regular expression should be able to handle those. So let’s add a single character of leniency between each legitimate character, and at the beginning and end of the string:
但…… 用户有时犯更严重的错误,我们的正常表达应该能够处理这些错误。因此,让我们在每一个合理的字母之间加上一个宽容的字符在每个字符的前后。
/^.?f?.?r?.?a?.?n?.?c?.?e?.?$/i
But then this would allow contiguous mistakes like “fafafafa”. We only want to allow a single incorrect mistake after each successfully entered character. For this we can use groups to force each character to be matched and a lazy quantifier on the mistake character to ensure that legitimate characters get to successfully match.
但当这允许连续错误像“fafafafa”,我们只允许一个单一的错误在每个正确输入字符之后。为此我们可以使用分组强制每个字符匹配和懒惰匹配的错误的字符串,确保合法字符可以成功匹配。
So:
/f.?otherStuff/
Becomes:
/(?:f.??)?otherStuff/
In English: Try to match f
followed by otherStuff
. If impossible then try to match any character after f
but before otherStuff
. (This is why lazy quantifiers (e.g. ??
) are so useful!)
就是说,尝试匹配跟在otherStuff之前的f。如果不可能的话,尝试匹配任何在“f” 之后,在“otherStuff” 之前的字符。
The entire regex would become:
完整的正则表达式会变成:
/^(?:.(?=f))?(?:f.??)?(?:r.??)?(?:a.??)?(?:n.??)?(?:c.??)?(?:e.??)?$/i
We should probably capture each individual match (f
should be (f)
) so that we can analyze the result and score it appropriately.
我们可能需要捕获每个单独的匹配(f
应该是(f)
) ,以便我们可以分析结果并适当地评分。
var r = /^(?:(f).??)?(?:(r).??)?(?:(a).??)?(?:(n).??)?(?:(c).??)?(?:(e).??)?$/i
'f$R-aN_cEx'.match(r);
// => ["f$R-aN_cEx", "f", "R", "a", "N", "c", "E"]
The regular expression, broken down:
正则表达式解释:
/
^ # Start of string
(?: # Non-captured group(非捕获)
(f) # Match and capture 'f' (匹配并捕获"f")
.?? # Followed lazily by any character (懒惰匹配跟随字符)
)? # Entire group is optional (组可选)
(?: # Non-captured group
(r) # Match and capture 'f'
.?? # Followed lazily by any character
)? # Entire group is optional
... # Etc.
$ # End of string
/i
A quick note: lazy or lazily in the context of regular expressions simply means that that thing will be intentionally excluded from the first match attempt and will only be used if the subsequent regular expression is unsuccessful without it.
小便签:正则表达式中懒惰匹配是指……
One caveat with the above regex is that it doesn’t allow a mistake to be at the beginning of the string. We could fix this with a lookahead to the effect of “allow a mistake here as long as its followed by a non-mistake” but since “non-mistake” could effectively be any character in the legitimate string it’s easier to just make allowances for that initial mistake in each group. Additionally, we probably want to capture every single mistake, in addition to legitimate characters. Here’s our next iteration:
一个需要注意的是,上面的正则表达式,它不允许在字符串的开头有错误。我们可以修复这个前瞻错误通过 “允许犯错误,只要它跟在一个非错误后面”, 但由于 “非错误” 可以有效地在合法字符串中的任何字符,使得它更容易在每个组中出现所允许的初始错误。此外,除了合法的字符,我们还可能要捕捉每一个错误。下面是我们的实现:
/
^ # Start of string
(?: # Non-captured group
(^.)? # Captured optional mistake at the beginning of the string
# ===============================================
(f) # Match and capture 'f'
(.??) # Followed lazily by any character (captured)
)? # Entire group is optional
... # Etc.
$ # End of string
/i
The check (^.)?
has to be specified in each group, to account for mistakes that don’t involve “f”, like “krance” or “ttance”, etc.
检查(^.)?
必须每组都指定, 以便考虑错误不涉及到 “f”, 就像 “krance” 和 “ttance”, 等等。
Since we’re aiming to genericize this entire mess, we should create a generator that assembles the regular expression given any piece of text:
我们的目标是是要去处理这一类问题,所以我们需要构建一个完整的生成器去组装将给定的文本集合的正则表达式。
function makeFuzzyRegex(string) {
if (!string) { return /^$/; }
// Escape any potential special characters:
var cleansed = string.replace(/\W/g, '\\$&');
return RegExp(
'^' +
cleansed.replace(
// Find every escaped and non-escaped char:
/(\\?.)/g,
// Replace with fuzzy character matcher:
'(?:(^.)?($1)(.??))?'
) +
'$',
'i'
);
}
makeFuzzyRegex('omg');
// => /^(?:(^.)?(o)(.??))?(?:(^.)?(m)(.??))?(?:(^.)?(g)(.??))?$/i
This regex matched against ‘_o-m*g!’ produces:
[
// Full match:
"_o-m*g!",
// Captures:
"_", // Mistake
"o", // Legit
"-", // Mistake
undefined, // Void mistake
"m", // Legit
"*", // Mistake
undefined, // Void mistake
"g", // Legit
"!" // Mistake
]
The captures are in groups of three, with every second capture being the legitimate character (case-insensitive), and with every first and third potentially being mistakes.
捕获是在三组,每组捕获都是合法性(不区分大小写),并与每组第一和第三个字符都可能潜在错误。
We can then loop through these captures and apply weights as we see fit.
我们可以循环这些捕捉和应用权重作为我们合适的分数。
var fullMatch = makeFuzzyRegex('omg').exec('_o-m*g!');
var captures = fullMatch.slice(1); // Get captures specifically
var score = 0;
for (var i = 0, l = captures.length; i < l; i += 3) {
if (captures[i]) score -= 1;
if (captures[i+1]) score += 10;
if (captures[i+2]) score -= 1;
}
score; // => 26
That scoring is quite arbitrary, but we’ve at least prescribed our wish to score successes more than we punish mistakes (10 vs 1).
这计算是十分随意的,但是我们至少指定我们的愿望比我们惩罚的错误更成功(10 vs 1)。
We can start to play with the heuristics of this if we wrap it all up:
我们把它封装起来,让Ta溜溜:
function createFuzzyScorer(text) {
var matcher = makeFuzzyRegex(text);
return function(query) {
var match = matcher.exec(query);
if (!match) return 0;
var captures = match.slice(1);
var score = 0;
for (var i = 0, l = captures.length; i < l; i += 3) {
if (captures[i]) score -= 1;
if (captures[i+1]) score += 10;
if (captures[i+2]) score -= 1;
}
return score;
};
function makeFuzzyRegex(string) {
if (!string) { return /^$/; }
// Escape any potential special characters:
var cleansed = string.replace(/\W/g, '\\$&');
return RegExp(
'^' +
cleansed.replace(
// Find every escaped and non-escaped char:
/(\\?.)/g,
// Replace with fuzzy character matcher:
'(?:(^.)?($1)(.??))?'
) +
'$',
'i'
);
}
}
Our first attempt isn’t too bad:
我们的第一次尝试并不太坏:
var score = createFuzzyScorer('omg');
score('omg'); // => 30
score('xOmg'); // => 29
score('.o.m.g.'); // => 26
score('om'); // => 20
score('og'); // => 20
score('o'); // => 10
score('nope'); // => 0
These seem like sensible enough scores, generally, but we’re more interested in autocompletion, and so there’s an obvious predictive element there. If a user types ‘o’ then that should probably score higher than ‘g’ if we’re testing against ‘omg’, but with the above mechanism they both receive a standard 10:
这似乎是可感知的足够的分数,一般,但我们在自动完成更感兴趣,所以有一个明显的预测元素的存在。当我们我们测试‘omg’时,如果用户键入”o”那应该得分高于”g”,但上述方法都接受标准的10:
var score = createFuzzyScorer('omg');
score('o'); // => 10
score('g'); // => 10
We can fix this by applying a higher weight to matches that appear earlier in the string:
我们可以用更高的权重来解决字符串中较早出现的匹配问题:
// The scoring loop:
for (var i = 0, l = captures.length; i < l; i += 3) {
if (captures[i]) score -= 0.1;
if (captures[i+1]) score += (l - i) / l; // the magic
if (captures[i+2]) score -= 0.1;
}
Now the score given for any singular legitimate match will decrease as the index (i
) increases. Here are the results:
现在任何奇异的合法的匹配的得分将会递减随着指数(i)增加。这里是结果:
var score = createFuzzyScorer('omg');
score('omg'); // => 1.99
score('xOmg'); // => 1.90
score('om'); // => 1.66
score('.o.m.g.'); // => 1.59
score('og'); // => 1.33
score('o'); // => 1.00
score('nope'); // => 0.00
This is getting closer to our intuition. The next step would be to try to create a real autocompletion widget. I’ve done it so I know that we’ll want to make one more change. The problem with our scoring right now is that it’ll award legitimate characters relative to the length of the string. But when comparing scores across multiple subject strings, this approach seems broken.
这是越来越接近我们的直觉。下一步将努力创造一个真正的自动完成功能部件。我已经这样做了,所以我知道,我们将要做一个改变。现在我们分值的问题是,奖励合法的字符相对于字符串的长度。但是,当比较多个同类字符串的分数时,这种方法似乎被打破。
createFuzzyScorer('RuneScript')('Ru'); // 1.9
createFuzzyScorer('Ruby')('Ru'); // 1.7
These should both score equally, as “Ru” is just as likely to become “Ruby” as it is to become “RuneScript”. To achieve this we should only take into account the index, and make the weight of any scoring decision inversely proportional to that index, in this case via an exponential taper (pow(index, -2)
).
显然,分数应该相等,因为 “Ru” 推演成 “Ruby” 和 “RuneScript” 是一样. 为了实现这一点,我们应该考虑到该指数,并使任何得分决定的权重与指数成反比,在这种情况下,通过指数锥度 (pow(index, -2)
)
// The scoring loop:
for (var i = 0, l = captures.length; i < l; i += 3) {
var relevancyOfCharacter = Math.pow(i + 1, -2);
if (captures[i]) score -= relevancyOfCharacter * 0.1;
if (captures[i+1]) score += relevancyOfCharacter * 1;
if (captures[i+2]) score -= relevancyOfCharacter * 0.1;
}
(Final version of createFuzzyScorer
available as a gist.)
See this demo using programming languages as the dataset. Try intentionally misspelling something (jawascript), or missing out characters (jaascit), or just going a little crazy (jahskt). It works beautifully.
在数据集用编程语言实现查看demo. 尝试拼写错误(jawascript),或者丢失字母(jaascit)或者输入一些类似的(jahskt), 它依然工作得很漂亮。
To achieve speedy sorting, a fuzzy scorer is created for every single value before the user types anything:
为了实现快速排序, 每一个单一的值的模糊分数应该在用户输入之前被创建:
var data = PROGRAMMING_LANGUAGES.map(function(lang, i) {
return {
actualValue: lang,
score: createFuzzyScorer(lang),
i: i,
toString: function() { return lang; }
};
});
This means we can iterate through data
on every relevant input event, and call the score()
method with the current query. We can then bundle this into a filter->sort->slice flow to get our list of sensible suggestions:
这意味着我们可以遍历每个相关的输入事件的 data ,调用 score() 方法来实现当前查询。我们可以把这变成一个 filter->sort->slice 得出我们列出的有判断力的建议:
var sorted = data.filter(function(item) {
// Get rid of any very unlikely matches (and cache the score!)
return (item._cachedScore = item.score(query)) >= .5;
}).sort(function(a, b) {
var as = a._cachedScore;
var bs = b._cachedScore;
// Sort by score, and if score is equal, then by original index:
// (We would typically return 0 in that case but some engines don't stable-sort)
return as > bs ? -1 : as == bs && a.i < b.i ? -1 : 1;
}).slice(0, 10); // We only need the top 10...
And.. we’re done. It’s never really finished though: you’ll find endless tweaks that can be made to the scorer to make it more believably resemble human-like intuition.
我们做到了,虽然它从来没有真正完成,你会发现很多的调整,使得分数更加人工智能。
For those wanting to test the resulting country autocompletion interaction: See the demo.
对于那些想要测试国家自动完成返回值交互的,See the demo.
I guess, despite my initial warning, I wouldn’t actually mind using this in production, as long as there were a decent number of unit tests. I’d probably also assemble the regular expressions on the server and serve them up as literals. It’s also worth mentioning that almost everything in this post has been exploring the fuzzy-matching of very short strings in small datasets. Even in the case of the country demo, to get more applicable results, I broke up long names into the component parts and then scored against each. E.g.
我想,尽管我最初的忠告,我不介意使用这个在生产,只要做一些数量的单位测试。我可能也组装正则表达式在服务器并为提供服务。值得一提的是,几乎所有的这篇文章都在探索小数据集上的模糊匹配。即使在国家输入案例的演示,以获得更适用的结果,我折断了长的名字,组成部分,然后对每一个得分。例如
// E.g. Macedonia, the Former Yugoslav Republic of:
var scorers = [
"Macedonia, the Former Yugoslav Republic of",
"Macedonia",
"the",
"former",
"yugoslav",
"republic",
"of"
].map(createFuzzyScorer);
// Etc.
And this would be terribly inefficient on a larger scale, so with any dataset longer than a list of countries you’re probably best to explore Trie-based approaches to autocompletion.
And with that, I’ll shut-up and wish you merry regex’ing!
然而这在大规模扫描下是非常低效的,所以任何数据集比的国家名单长的,你可能最好的探究 Trie-based 方法 来处理自动完成。然后,我会闭嘴, wish you merry regex’ing!
译者注:
文中及其评论提到的资源:
https://github.com/padolsey/relevancy.js
https://github.com/bevacqua/fuzzysearch
https://github.com/gf3/Levenshtein
作者github:https://github.com/padolsey
译者:主要用到正则懒惰匹配去实现真值
线性回归。you are truly, how much truly you are!
就是通过匹配度分数进行排序,而不是单单的boolean 值指定,再a-z 排序输出,
https://github.com/bevacqua/fuzzysearch所用的方法,你只能在true or false 之间选择,而padolsey所用的方法字样引入了权重概念更加智能,但是在大规模数据下是非常低效的。另外在作者的博客评论中也提到的 https://github.com/gf3/Levenshtein, 可以自行wiki一下。
最后如有漏译,错译,欢迎指正。
]]>list :: show currently installed rubies, interactive output.
install :: install one or many ruby versions
use :: setup current shell to use a specific ruby version
详细帮助文档(rvm -h):
= rvm
* https://rvm.io/
* https://github.com/wayneeseguin/rvm/
== DESCRIPTION:
RVM is the Ruby enVironment Manager (rvm).
It manages Ruby application environments and enables switching between them.
== Installation
curl -L https://get.rvm.io | bash -s stable --autolibs=enabled [--ruby] [--rails] [--trace]
stable :: Install stable RVM, good for servers.
--ruby :: Additionally install latest ruby version (MRI).
--rails :: Install gem rails into default gemset (also installs ruby=).
--trace :: Print trace of the installation, gist output when you have problems.
--autolibs :: Enable or disable autolibs see: https://rvm.io/rvm/autolibs
More details here: https://rvm.io/rvm/install/
== Usage
rvm [Flags] [Options] Action [Implementation[,Implementation[,...]]
== Flags
--default :: with 'rvm use X', sets the default ruby for new shells to X.
--debug :: Toggle debug mode on for very verbose output.
--disable-binary :: Install from source instead of using binaries
--trace :: Toggle trace mode on to see EVERYTHING rvm is doing.
--force :: Force install, removes old install & source before install.
--summary :: Used with rubydo to print out a summary of the commands run.
--latest :: with gemset --dump skips version strings for latest gem.
--gems :: with uninstall/remove removes gems with the interpreter.
--docs :: with install, attempt to generate ri after installation.
--reconfigure :: Force ./configure on install even if Makefile already exists.
--skip-gemsets :: with install, skip the installation of default gemsets.
--quiet-curl :: Makes curl silent when fetching data
== Options
-v|--version :: Emit rvm version loaded for current shell
-l|--level :: patch level to use with rvm use / install
--bin :: path for binaries to be placed (~/.rvm/bin/)
-S :: Specify a script file to attempt to load and run (rubydo)
-e :: Execute code from the command line.
--gems :: Used to set the 'gems_flag', use with 'remove' to remove
gems
--archive :: Used to set the 'archive_flag', use with 'remove' to remove
archive
--patch :: With MRI Rubies you may specify one or more full paths to
patches
for multiple, specify comma separated:
--patch /.../.../a.patch[%prefix],/.../.../.../b.patch
'prefix' is an optional argument, which will be bypassed
to the '-p' argument of the 'patch' command. It is separated
from patch file name with '%' symbol.
-C|--configure :: custom configure options. If you need to pass several
configure options then append them comma separated:
-C --...,--...,--...
--nice :: process niceness (for slow computers, default 0)
--ree-options :: Options passed directly to ree's './installer' on the
command line.
--with-rubies :: Specifies a string for rvm to attempt to expand for set
operations.
== Action
(Note: for most actions, 'rvm help action-name' may provide more information.)
*usage* :: show this usage information
version :: show the rvm version installed in rvm_path
use :: setup current shell to use a specific ruby version
reload :: reload rvm source itself (useful after changing rvm source)
implode :: (seppuku) removes the rvm installation completely.
This means everything in $rvm_path (~/.rvm || /usr/local/rvm).
This does not touch your profiles. However, this means that you
must manually clean up your profiles and remove the lines which
source RVM.
get :: {head,stable} upgrades rvm to latest head or stable version.
Check 'rvm help get' for more details.
(If you experience bugs try this first with head version, then
ask for help in #rvm on irc.freenode.net and hang around)
reset :: remove current and stored default & system settings.
(If you experience odd behavior try this second)
info :: show the *current* environment information for current ruby
current :: print the *current* ruby version and the name of any gemset
being used.
debug :: show info plus additional information for common issues
install :: install one or many ruby versions
See also: https://rvm.io/rubies/installing/
uninstall :: uninstall one or many ruby versions, leaves their sources
remove :: uninstall one or many ruby versions and remove their sources
reinstall :: reinstall ruby and runs gem pristine on all gems,
make sure to read output, use 'all' for all rubies.
migrate :: Lets you migrate all gemsets from one ruby to another.
upgrade :: Lets you upgrade from one version of a ruby to another, including
migrating your gemsets semi-automatically.
wrapper :: generates a set of wrapper executables for a given ruby with the
specified ruby and gemset combination. Used under the hood for
passenger support and the like.
cleanup :: Lets you remove stale source folders / archives and other
miscellaneous data associated with rvm.
repair :: Lets you repair parts of your environment e.g. wrappers, env
files and and similar files (e.g. general maintenance).
fix-permissions :: Repairs broken permissions (e.g. by sudo or chef)
osx-ssl-certs :: Helps update certificates for OpenSSL installed by rvm on OSX.
snapshot :: Lets you backup / restore an rvm installation in a lightweight
manner.
alias :: Lets you set shortcut strings for convenience with 'rvm use'.
disk-usage :: Tells you how much disk space rvm install is using.
tools :: Provides general information about the ruby environment,
primarily useful when scripting rvm.
docs :: Tools to make installing ri and rdoc documentation easier.
rvmrc :: Tools related to managing rvmrc trust and loading.
patchset :: Tools related to managing ruby patchsets.
do :: runs an arbitrary command against specified and/or all rubies
cron :: Manages setup for using ruby in cron tasks.
gemset :: gemsets: https://rvm.io/gemsets/
rubygems :: Switches the installed version of rubygems for the current ruby.
config-get :: display values for RbConfig::CONFIG variables.
gemdir :: display the path to the current gem directory (GEM_HOME).
fetch :: Performs an archive / src fetch only of the selected ruby.
list :: show currently installed rubies, interactive output.
https://rvm.io/rubies/list/
autolibs :: Controls settings for automatically installing dependencies.
pkg :: Install a dependency package {readline,iconv,zlib,openssl}
https://rvm.io/packages/
notes :: Display notes, with operating system specifics.
export :: Temporarily set an environment variable in the current shell.
unexport :: Undo changes made to the environment by 'rvm export'.
requirements :: Installs additional OS specific dependencies/requirements for
building various rubies. Usually run by install.
mount :: Install rubies from external locations.
user :: Tools for managing RVM mixed mode in multiuser installations.
group :: Tools for managing groups in multiuser installations.
== Implementation
*ruby* :: MRI/YARV Ruby (The Gold Standard) {1.8.x, 1.9.x, 2.0.x, 2.1.x, 2.2.x}
jruby :: JRuby, Ruby interpreter on the Java Virtual Machine.
rbx :: Rubinius
ree :: Ruby Enterprise Edition, MRI Ruby with several custom
patches for performance, stability, and memory.
macruby :: MacRuby, insanely fast, can make real apps (Mac OS X Only).
maglev :: GemStone Ruby, awesome persistent ruby object store.
ironruby :: IronRuby, NOT supported yet. Looking for volunteers to help.
system :: use the system ruby (eg. pre-rvm state)
default :: use the default ruby (or the system ruby if a default hasn't been set).
https://rvm.io/rubies/default/
== Resources:
https://rvm.io/
https://www.pivotaltracker.com/projects/26822
== Contributions:
Any and all contributions offered in any form, past present or future, to the
RVM project are understood to be in complete agreement and acceptance with the
Apache License v2.0.
== LICENSE:
Copyright (c) 2009-2011 Wayne E. Seguin
Copyright (c) 2011-2015 Michal Papis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Action documentation available with 'rvm help action-name':
alias autolibs cleanup config-get cron current debug disk-usage do docs fetch fix-permissions gemdir gemset gemset/copy get group implode info install list migrate mount notes osx-ssl-certs patchset pkg prepare reinstall remove repair requirements reset rubygems rvmrc rvmrc/to rvmrc/warning snapshot srcdir tools uninstall upgrade use user wrapper
Other topics of documentation available with 'rvm help topic':
alt color upgrade-notes
For additional information please visit RVM's documentation website:
https://rvm.io/
If you still cannot find an answer to your question, find
'wayneeseguin', 'mpapis', 'richo', 'lemoinem' or 'rys' in #rvm on irc.freenode.net:
http://webchat.freenode.net/?channels=rvm
]]>假如想省钱,只是写写博客而已。免去服务器麻烦。推荐你用octopress,可以肯定的,没有wordpress 那么多功能。octopress 是基于 jekyll 的 静态化博客,可以在github pages 和 gitcafe 等上运行。下面我们来部署一下 我们的A blogging framework for hackers.
博客。
先看一下技术栈:
octopress -> jekyll -> ruby
Markdown (or Textile), Liquid, HTML & CSS go in. Static sites come out ready for deployment.
Liquid 一个 ruby模板引擎,Markdown是……。
所以 安装octopress之前必不可少的是安装Ta的生态环境。
打开rvm首页,打开终端,使用以下命令安装吧。
$ gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
$ \curl -sSL https://get.rvm.io | bash -s stable
$ rvm -h
$ rvm list
$ rvm install 2.2.1
$ rvm use 2.2.1
某些时候会提示,请使用 –bash-login. 使用就OK了。
安装完成后可以用ruby –version进行验证
安装Ruby完成后就按照官方指南安装Octpress
#clone octopress
$ git clone git://github.com/imathis/octopress.git octopress
$ cd octopress
##安装依赖
$ gem install bundler
$ bundle install
##安装octopress默认主题
$ rake install
bundle 命令 是 Gemfile 文件用的,和 nodejs 项目的 package.json 类似,都是一键安装依赖包文件。
接下来需要把Blog部署到github上去,第一步要做的是去github创建一个username.github.io
的repo,比如我的就叫icai.github.io
。
然后运行以下命令,并依照提示完成github和Octopress的关联
$ rake setup_github_pages
$ rake generate
$ rake deploy
rake 命令 是 Rakefile 文件用的,你在带有Rakefile 目录下执行 rake 命令,
当然你可以 rake -T
查看当前所有的rake Task, 或者 rake -h 获取帮助
把生成后的代码上传到github
$ git add .
$ git commit -m 'create blog'
$ git push origin source
完成后等待一段时间后就能访问http://username.github.io
看到自己的博客了
配置文件路径为~/octopress/_config.yml
参考: 官方
编辑完成后
$ rake generate
$ git add .
$ git commit -m "settings"
$ git push origin source
$ rake deploy
Octopress有许多第三方主题可以选择,首先在这里上寻找喜欢的主题,点击进入对应主题的git,一般在readme上都会有安装流程
#这里以安装allenhsu定制的greyshade主题为例,原作者是shashankmehta
$ git clone git@github.com:allenhsu/greyshade.git .themes/greyshade
#Substitue 'color' with your highlight color
$ echo "\$greyshade: color;" >> sass/custom/_colors.scss
$ rake "install[greyshade]"
$ rake generate
$ git add .
$ git commit -m "theme"
$ git push origin source
$ rake deploy
目前版本的Octopress会在/source/blog/categories
下创建一个index.markdown
来作为分类的首页,但这个首页在标签有中文时会出现无法跳转的情况,原因是因为在出现中文标签时Octopress会把文件的路径中的中文转换成拼音,而在Category跳转时是直接写了中文路径,结果自然是404。解决方法是自己实现一个分类首页并处理中文。
首先按照这里的方法实现index.html
将plugins/category_list_tag.rb
中的
category_url = File.join(category_dir, category.gsub(/_|\P{Word}/, '-').gsub(/-{2,}/, '-').downcase)
替换成
category_url = File.join(category_dir, category.to_url.downcase)
这样你的博客就可以支持中文标签的跳转了。
经过上面几部后,博客已经成功搭建,现在就可以开始写博文了。
#如果用的是终端
$ rake new_post['title']
#如果用的是ZSH
$ rake "new_post[title]"
#或者
$ rake new_post\['title'\]
生成的文件在~/source/_posts
目录下
#...markdown写博文
$ rake preview #localhost:4000
$ rake generate
$ git add .
$ git commit -m "comment"
$ git push origin source
$ rake deploy
var radian = angle * Math.PI / 180;
var angle = radian* 180/ Math.PI;
alert(radian/ angle == Math.PI / 180);
// Math.atan2()是弧度制
var radian = Math.atan2((p1.y-p0.y),(p1.x-p0.x));
//————
//已知角度degree(0,360),半径(raduis),求圆上的点(p1)
var radian = angle * Math.PI / 180;
p1.x = raduis * Math.cos(radian) + p0.x;
p1.y = raduis * Math.sin(radian) + p0.y;
function getPointAt(p0, radius, angle) {
var radian = angle * Math.PI / 180,
p1 = {};
p1.x = raduis * Math.cos(radian) + p0.x;
p1.y = raduis * Math.sin(radian) + p0.y;
return p1;
}
//———-
//两点A,B,求角度degree(0,360),A为Origin(p0).
var radian = Math.atan2((p1.y-p0.y),(p1.x-p0.x));
var angle = radian * 180 / Math.PI;
//———
//已知两点A(p0),B(p1),求AB连线上距离其中一点的D0 or D1的坐标
// ….
都是高中数学
参考:
https://zh.wikipedia.org/wiki/%E6%9E%81%E5%9D%90%E6%A0%87%E7%B3%BB
https://zh.wikipedia.org/wiki/%E7%AC%9B%E5%8D%A1%E5%84%BF%E5%9D%90%E6%A0%87%E7%B3%BB
应用场景:
]]>javascript与C,C++,Java等语言,他们之间总让人感觉存在一层纱,javascript易入门,难深入,而其他则相反(个人观点)。
其实不论什么语言,难与易的突破口都在语法基础上,当你越学得深入,你会发现语言之间的相同是如此的美妙。其实个人感觉javascript是相对其他语言是最容易学的一种语言。但部分人确认为,javascript十分难学,什么兼容性都把你搞死了。个人认为应该把javascript的难与易一分为二。javascript的语言是相对其他语言简单的,而且没有过多的语法概念(泛型,多态,模板),让编程者留下很多可以思考和想象的空间。或许多人喜欢javascript就是因为它的灵活性。
javascript是一门松散的面向对象脚本语言
为什么说是松散的呢?javascript一切皆对象(var),就是说不论数组,函数……都是继承自对象。
javascript给我们所展现对象继承存储其实就像treegrid数据类似,但是正好相反。
就是说javascript对象继承存储和javascript静态方法存储是相反的。
图:treegrid数据
图:javascript对象继承
图:javascript静态方法
图:javascript Object layout
上面所说的其实就是javascript对象构成,或许还会困惑,下面将深入分析。
什么叫做javascript对象引用?简单地说就是非实体对象赋值,即是非String,Number,Array,Null,Undefined等对象(除Object对象)赋值。
// 引用
var source = {
warm:function(v){
alert(v)
}
};
var quote = source;
delete source.warm;
quote.warm('b');
上面代码会报错,因为source是非实体对象,quote内存地址指向source,当删除source.warm的时候,quote下的warm随之消失。
对于上述的代码要想不报错的话,就必须对source.warm也赋值给quote.warm,因为source.warm是实体对象(Function)。
对对象进行遍历,子对象一对一赋值就叫做javascript对象复制。既然是遍历,自然有深浅度。所以我们对彻底遍历叫做深拷贝,只遍历子对象第一层叫做浅拷贝。
(1)
var source = {
warm:function(v){
alert(v)
}
};
function extend(target, source) {
for (var n in source) {
if(source.hasOwnProperty(n)){
target[n] = source[n]
}
}
return target
}
var exObj = extend({},source);
delete source.warm;
exObj.warm('b');
(2)上述代码不会报错,因为source通过遍历已经把其子对象复制到exObj下了。
var source = {
warm:function(v){
alert(v)
},
childs:{
log:function(v){
alert('log'+v);
},
info:function(v){
alert('info'+v);
}
},
_private:{
_name:'source',
_age:12
}
};
function extend(target, source) {
for (var n in source) {
if(source.hasOwnProperty(n)){
target[n] = source[n]
}
}
return target
}
var exObj = extend({},source);
会发生错误,为什么呢?上面同样是复制,当我们执行如下代码:
delete source.childs.log;
delete source.childs;
exObj.childs.log('b');
同理,我们执行如下:就是我们一开始所说遍历的深度不彻底,我们通过上述extend方法只遍历了source.childs,而source.childs是非实体对象,所以只是充当exObj.childs对象的引用。
exOBj._private._age++;
source._private._age; // 13
我们要实现深拷贝的话,就必须 判断其子对象是否是非实体对象,假如是非实体对象的话,就进行递归遍历操作。我们改变exOBj._private._age的时候,source._private.age也发生改变,道理和上面的一样。
还记得jquery.extend api吗
jQuery.extend( [deep ], target, object1 [, objectN ] )
当我们第一个参数传入true的时候,就帮我我们实施深拷贝操作。
学过java或者c++,php等语言的读者就好理解了,刚开始前言的
图:javascript对象继承 和 图:javascript Object layout就说的很详细了。那么他们是怎么实现的呢?
其实javascript中还有一个很重要的概念就是作用域。
编程语言中的继承就是子类继承父类已有的方法,属性,并做相应的拓展,进而形成自己的具有特定属性和方法的类。
开始学面向对象语言的时候,通常都会举一个经典例子,就是动物父类,派生鸟类,鱼类……,而鸟类接着派生出鹰,麻雀……。
而javascript(ecma5)还没有给出官方的方法进行上述操作而已。但是后续(ecma 6)将会支持。正如,一开始说所,javascript是一门松散的面向对象语言,留给变成者很多思考的空间。
javascript中有两个很重要的方法就是call,apply。人们常谈的javascript设计模式常常都离不开call,apply,回调函数等等。
那么如何才能实现(图:javascript对象继承)中类似的对象继承?
接着动物的例子:
what is animal? 它们能够对环境作出反应并移动,捕食其他生物。
function animal(){
}
animal.prototype = {
catchOther:function(){
},
protectSelf:function(){
}
}
鸟类:
function bird(){
}
bird = wrapper(bird,animal);
bird.prototype.fly = function(){
console.log('flying');
}
wrapper??用来继承,如何写才能实现如下呢?
new 一个 animal ??并指向bird.prototype ?
好像可以实现,但是当我们打入
bird.prototype.proto.constructor的时候是什么呢??构造函数是否符合对象的构造函数的定义呢?
我们给出一个简单的方案,至于构造函数应该执行谁的问题留作思考,网上也很多关于这方面的讨论?自己可以搜一下
function wrapper(child, parent) {
var ins = function() {
child.apply(this, arguments);
};
var subclass = function() {};
subclass.prototype = parent.prototype;
ins.prototype = new subclass;
return ins;
};
所谓的静态方法就是不涉及抽象对象函数实现的方法,举个很简单的例子,如jquery中的$.isFunction,$.isArray等等就属于静态方法。同样是上面的例子
四、javascript对象静态方法
var source = {
warm:function(v){
alert(v)
},
childs:{
log:function(v){
alert('log'+v);
},
info:function(v){
alert('info'+v);
}
},
_private:{
_name:'source',
_age:12
}
};
function extend(target, source) {
for (var n in source) {
if(source.hasOwnProperty(n)){
target[n] = source[n]
}
}
return target
}
var exObj = extend({},source);
这里不合理的地方在于当我们对其成员可操作,假如上述是公共方法,团队每个人都可以对其调用,并假如warm方法会对其兄弟属性_private产生改变。如何_private下的是标识变量(flog ~),那么这个标识变量就违反了可操作的唯一性。
所谓的对象多态就是和构造函数或者方法参数传递进行动态识别处理。
依然是$.extend,还记得api有多少调用吗?
java中的多态是不同参数类型都重新定义一遍,好像十分费劲?javascript留给编程者的思考空间就是给你一个arguments,调用当前方法的参数集合,是一个类数组。
一般地:
function(a){
var args = [].slice(arguments,0);
if(args[0] == 'xx')doxxx;
if(typeof args[0] == 'function') doxxx;
}
相关阅读:是不是很有意思呢。
1、javascript权威指南 源代码分析(一)Objects
2、javascript权威指南 源代码分析(二)Functions
3、(JavaScript中proto与prototype的关系)
后记:
码字真累,百度空间蛋疼啊,写到一半按了不知道什么键,没有了一大截……
有高亮代码时,不要按ctrl+Z,不然你的人生十分钟会浪费掉的。
2013/10/19
本文来源
菜籽油: http://hi.baidu.com/tp100/item/4d59a03b4b73fac42f8ec25e
-EOF-
]]>