Best JavaScript code snippet using best
roibatchLoader.py
Source:roibatchLoader.py
1"""The data layer used during training to train a Fast R-CNN network.2"""3from __future__ import absolute_import4from __future__ import division5from __future__ import print_function6import torch.utils.data as data7from PIL import Image8import torch9from model.utils.config import cfg10from roi_data_layer.minibatch import get_minibatch, get_minibatch11from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes12import numpy as np13import random14import time15import pdb16class roibatchLoader(data.Dataset):17 def __init__(self, roidb, ratio_list, ratio_index, batch_size, num_classes, training=True, normalize=None):18 self._roidb = roidb19 self._num_classes = num_classes20 # we make the height of image consistent to trim_height, trim_width21 self.trim_height = cfg.TRAIN.TRIM_HEIGHT22 self.trim_width = cfg.TRAIN.TRIM_WIDTH23 self.max_num_box = cfg.MAX_NUM_GT_BOXES24 self.training = training25 self.normalize = normalize26 self.ratio_list = ratio_list27 self.ratio_index = ratio_index28 self.batch_size = batch_size29 self.data_size = len(self.ratio_list)30 # given the ratio_list, we want to make the ratio same for each batch.31 self.ratio_list_batch = torch.Tensor(self.data_size).zero_()32 num_batch = int(np.ceil(len(ratio_index) / batch_size))33 for i in range(num_batch):34 left_idx = i*batch_size35 right_idx = min((i+1)*batch_size-1, self.data_size-1)36 if ratio_list[right_idx] < 1:37 # for ratio < 1, we preserve the leftmost in each batch.38 target_ratio = ratio_list[left_idx]39 elif ratio_list[left_idx] > 1:40 # for ratio > 1, we preserve the rightmost in each batch.41 target_ratio = ratio_list[right_idx]42 else:43 # for ratio cross 1, we make it to be 1.44 target_ratio = 145 self.ratio_list_batch[left_idx:(right_idx+1)] = target_ratio46 def __getitem__(self, index):47 if self.training:48 index_ratio = int(self.ratio_index[index])49 else:50 index_ratio = index51 # get the anchor index for current sample index52 # here we set the anchor index to the last one53 # sample in this group54 minibatch_db = [self._roidb[index_ratio]]55 blobs = get_minibatch(minibatch_db, self._num_classes)56 data = torch.from_numpy(blobs['data'])57 im_info = torch.from_numpy(blobs['im_info'])58 # we need to random shuffle the bounding box.59 data_height, data_width = data.size(1), data.size(2)60 if self.training:61 np.random.shuffle(blobs['gt_boxes'])62 gt_boxes = torch.from_numpy(blobs['gt_boxes'])63 ########################################################64 # padding the input image to fixed size for each group #65 ########################################################66 # NOTE1: need to cope with the case where a group cover both conditions. (done)67 # NOTE2: need to consider the situation for the tail samples. (no worry)68 # NOTE3: need to implement a parallel data loader. (no worry)69 # get the index range70 # if the image need to crop, crop to the target size.71 ratio = self.ratio_list_batch[index]72 if self._roidb[index_ratio]['need_crop']:73 if ratio < 1:74 # this means that data_width << data_height, we need to crop the75 # data_height76 min_y = int(torch.min(gt_boxes[:,1]))77 max_y = int(torch.max(gt_boxes[:,3]))78 trim_size = int(np.floor(data_width / ratio))79 if trim_size > data_height:80 trim_size = data_height 81 box_region = max_y - min_y + 182 if min_y == 0:83 y_s = 084 else:85 if (box_region-trim_size) < 0:86 y_s_min = max(max_y-trim_size, 0)87 y_s_max = min(min_y, data_height-trim_size)88 if y_s_min == y_s_max:89 y_s = y_s_min90 else:91 y_s = np.random.choice(range(y_s_min, y_s_max))92 else:93 y_s_add = int((box_region-trim_size)/2)94 if y_s_add == 0:95 y_s = min_y96 else:97 y_s = np.random.choice(range(min_y, min_y+y_s_add))98 # crop the image99 data = data[:, y_s:(y_s + trim_size), :, :]100 # shift y coordiante of gt_boxes101 gt_boxes[:, 1] = gt_boxes[:, 1] - float(y_s)102 gt_boxes[:, 3] = gt_boxes[:, 3] - float(y_s)103 # update gt bounding box according the trip104 gt_boxes[:, 1].clamp_(0, trim_size - 1)105 gt_boxes[:, 3].clamp_(0, trim_size - 1)106 else:107 # this means that data_width >> data_height, we need to crop the108 # data_width109 min_x = int(torch.min(gt_boxes[:,0]))110 max_x = int(torch.max(gt_boxes[:,2]))111 trim_size = int(np.ceil(data_height * ratio))112 if trim_size > data_width:113 trim_size = data_width 114 box_region = max_x - min_x + 1115 if min_x == 0:116 x_s = 0117 else:118 if (box_region-trim_size) < 0:119 x_s_min = max(max_x-trim_size, 0)120 x_s_max = min(min_x, data_width-trim_size)121 if x_s_min == x_s_max:122 x_s = x_s_min123 else:124 x_s = np.random.choice(range(x_s_min, x_s_max))125 else:126 x_s_add = int((box_region-trim_size)/2)127 if x_s_add == 0:128 x_s = min_x129 else:130 x_s = np.random.choice(range(min_x, min_x+x_s_add))131 # crop the image132 data = data[:, :, x_s:(x_s + trim_size), :]133 # shift x coordiante of gt_boxes134 gt_boxes[:, 0] = gt_boxes[:, 0] - float(x_s)135 gt_boxes[:, 2] = gt_boxes[:, 2] - float(x_s)136 # update gt bounding box according the trip137 gt_boxes[:, 0].clamp_(0, trim_size - 1)138 gt_boxes[:, 2].clamp_(0, trim_size - 1)139 # based on the ratio, padding the image.140 if ratio < 1:141 # this means that data_width < data_height142 trim_size = int(np.floor(data_width / ratio))143 padding_data = torch.FloatTensor(int(np.ceil(data_width / ratio)), \144 data_width, 3).zero_()145 padding_data[:data_height, :, :] = data[0]146 # update im_info147 im_info[0, 0] = padding_data.size(0)148 # print("height %d %d \n" %(index, anchor_idx))149 elif ratio > 1:150 # this means that data_width > data_height151 # if the image need to crop.152 padding_data = torch.FloatTensor(data_height, \153 int(np.ceil(data_height * ratio)), 3).zero_()154 padding_data[:, :data_width, :] = data[0]155 im_info[0, 1] = padding_data.size(1)156 else:157 trim_size = min(data_height, data_width)158 padding_data = torch.FloatTensor(trim_size, trim_size, 3).zero_()159 padding_data = data[0][:trim_size, :trim_size, :]160 # gt_boxes.clamp_(0, trim_size)161 gt_boxes[:, :4].clamp_(0, trim_size)162 im_info[0, 0] = trim_size163 im_info[0, 1] = trim_size164 # check the bounding box:165 not_keep = (gt_boxes[:,0] == gt_boxes[:,2]) | (gt_boxes[:,1] == gt_boxes[:,3])166 keep = torch.nonzero(not_keep == 0).view(-1)167 gt_boxes_padding = torch.FloatTensor(self.max_num_box, gt_boxes.size(1)).zero_()168 if keep.numel() != 0:169 gt_boxes = gt_boxes[keep]170 num_boxes = min(gt_boxes.size(0), self.max_num_box)171 gt_boxes_padding[:num_boxes,:] = gt_boxes[:num_boxes]172 else:173 num_boxes = 0174 # permute trim_data to adapt to downstream processing175 padding_data = padding_data.permute(2, 0, 1).contiguous()176 im_info = im_info.view(3)177 return padding_data, im_info, gt_boxes_padding, num_boxes178 else:179 data = data.permute(0, 3, 1, 2).contiguous().view(3, data_height, data_width)180 im_info = im_info.view(3)181 gt_boxes = torch.FloatTensor([1,1,1,1,1])182 num_boxes = 0183 return data, im_info, gt_boxes, num_boxes184 def __len__(self):...
sts_xyter_settrim_crate.py
Source:sts_xyter_settrim_crate.py
1#!/usr/bin/python2import time3import sys4import logging5import uhal6sys.path.append("../../../lib")7import global_dev_ctrl as gdc8import flim_dev_ctrl as fdc9import sts_xyter_dev_ctrl as sxdc10import numpy as np11import os12import sts_xyter_settings as settings13from os import path14log = logging.getLogger()15# This is a global level, sets the miminum level which can be reported16log.setLevel(logging.DEBUG)17sh = logging.StreamHandler(sys.stderr)18sh.setLevel(logging.INFO)19log.addHandler(sh)20fh = logging.FileHandler("./logs/" + sys.argv[0].replace('py', 'log'), 'w')21fh.setLevel(logging.DEBUG)22fmt = logging.Formatter('[%(levelname)s] %(message)s')23fh.setFormatter(fmt)24log.addHandler(fh)25uhal.setLogLevelTo(uhal.LogLevel.WARNING)26manager = uhal.ConnectionManager( settings.xml_filename )27hw = []28for edpb in settings.edpb_names:29 hw.append( manager.getDevice( edpb ) )30sts_com = []31flims = []32afck_id = []33afck_mac = []34for edpb_idx in range( 0, len(hw) ):35 sts_com.append( sxdc.sts_xyter_com_ctrl( hw[ edpb_idx ], "sts_xyter_dev") )36 flims.append( fdc.flim_dev_ctrl( hw[ edpb_idx ], "flim_ctrl_dev") )37 sts_com[ edpb_idx ].read_public_dev_info()38 afck_id.append( gdc.get_afck_id( hw[ edpb_idx ], "global_dev") )39 afck_mac.append( gdc.get_afck_mac( hw[ edpb_idx ], "global_dev") )40 log.info("\nAFCK ID: 0x%04x", afck_id[ edpb_idx ] )41 log.info("\nAFCK MAC: %s", afck_mac[ edpb_idx ] )42sts_ifaces = [[]]43for edpb_idx in range( 0, len(hw) ):44 sts_ifaces.append( [] )45 # iface_no: xyter_addr46 for i in range(0, sts_com[edpb_idx].interface_count()):47 if settings.iface_active[edpb_idx][i] == 1:48 sts_ifaces[edpb_idx].append(sxdc.sts_xyter_iface_ctrl(sts_com[edpb_idx],49 i, settings.sts_addr_map[edpb_idx][i], afck_id[edpb_idx]))50 for sts_iface in sts_ifaces[edpb_idx]:51 sts_iface.set_link_break(settings.LINK_BREAK)52 print("This script assumes that:")53 print("- AFCK clocks are already configured")54 print("- eLink calibration results are available")55 log.info("Start to configure AFCK")56 lmask = ((1 << 5) - 1) ^ settings.LINK_BREAK57 lmask |= (lmask << 5)58 for sts_iface in sts_ifaces[edpb_idx]:59 sts_iface.fast_sync( settings.LINK_BREAK )60 for sts_iface in sts_ifaces[edpb_idx]:61 sts_iface.EncMode.write(sxdc.MODE_FRAME)62 # Set the link mask accordingly63 sts_iface.emg_write(192, 25, lmask)64 for sts_iface in sts_ifaces[edpb_idx]:65 print("\n### Setting trim values for STSXYTER #%d ###")%( sts_iface.iface )66 if ("XXXXXX_XXXX" == settings.date[ edpb_idx ][ sts_iface.iface ] ):67 print("No date, time defined for trim file in the settings for this board")68 print("=> Do nothing!!! check your settings, this board is marked active in settings")69 continue70 sts_iface.emg_write(192,25,lmask)71 # ---------------------------------------------------------------------------------------72 # ---------------------------------------------------------------------------------------73 testch = 5874 test_thr = 12875 test_delta_ch = 576 test_npulse = 10077 setdisc_flag = 078 #vref_n = 28 # Vref_N AGH: 31 Test: 2279 #vref_p = 56 # Vref_P AGH: 48 Test: 5180 #vref_t = 188 # Vref_T AGH: 188 Test: 184 bit7: enable 5..0: threshold81 read_nword = 50082 shslowfs = 0 # 0,..,3 for FS=90,160,220,280ns83 # ---------------------------------------------------------------------------------------84 # ---------------------------------------------------------------------------------------85 #-----------------------------------------------------------------------------------------86 # SETTINGS for TRIM87 #-----------------------------------------------------------------------------------------88 trim_offset = 0;89 ch_min = 0;90 ch_max = 128;91 d_min = 0;92 d_max = 31;93 trim_ok_min = 0;94 trim_ok_max = 255;95 trim_ok_avg = 0;96 trim_ok_n = 0;97 trim_corr_flag = 1;98 #Holes from get_trim.py for vacuum feb-c (globtop)99 filename_trim_g = "trim_cal/"\100 "trim_cal_200707_gsi_feb_c_89_fast_39_adc_522217948_200.0_"\101 "holes.txt"102 #Holes from get_trim.py for vacuum feb-c (globtop) wide range103 filename_trim_gw = "trim_cal/"\104 "trim_cal_200709_gsi_feb_c_89_fast_14_adc_582218515_210.0_"\105 "holes.txt"106 # Holes from get_trim.py for box feb-c (blue cover)107 filename_trim_b = "trim_cal/"\108 "trim_cal_200615_gsi_feb_c_89_fast_39_adc_522217948_200.0_"\109 "holes.txt"110 # Holes from trim_sts.py for vacuum feb-c with Si-Junction MUCH mode111 filename_trim_si= "trim_cal/"\112 "trim_cal_200714_gsi_feb_c_89_fast_22_adc_582218430_250.0_"\113 "holes.txt"114 # Holes from trim_sts.py for vacuum feb-c, 241Am test 115 filename_trim_am= "trim_cal/"\116 "trim_cal_201120_gsi_feb_c_89_fast_32_adc_584618535_66.0_"\117 "holes.txt"118 #Decide which calibration to load119 filename_trim = filename_trim_g120 assert path.exists(filename_trim)121 data = np.genfromtxt( filename_trim )122 trim = [ [0 for d in range(d_min,d_max+1)] for ch in range(ch_min, ch_max)]123 #-------------------------------------------------------------------------------------124 # END of SETTINGS125 #-------------------------------------------------------------------------------------126 chn = data[:,1]127 disc_thr = data [:,][:,2:34]128 ## reading trim file129 for ch in range (ch_min,ch_max):130 for d in range (d_min,d_max+1):131 trim[ch][d] = int(disc_thr[:,d][ch:ch+1])132 if (trim[ch][d] < 0):133 trim[ch][d] = 0134 if (trim[ch][d] > 255):135 trim[ch][d] = 255136 print "\n"137 ##correct trim outliers138 if (trim_corr_flag == 1):139 for ch in range(ch_min, ch_max):140 trim_ok_avg = 0141 trim_ok_n = 0142 for d in range(d_min,d_max+1):143 if ((trim[ch][d] >= trim_ok_min) and (trim[ch][d] <= trim_ok_max)):144 trim_ok_avg += trim[ch][d]145 trim_ok_n += 1146 trim_ok_avg = int(trim_ok_avg/trim_ok_n + 0.5)147 for d in range(d_min,d_max):148 if ((trim[ch][d] < trim_ok_min) or (trim[ch][d] > trim_ok_max)):149 print "Corrected channel ", '{:4d}'.format(ch), " disc: ", '{:3d}'.format(d), " from " , trim[ch][d], " to ", '{:3d}'.format(trim_ok_avg), "\n"150 trim[ch][d] = trim_ok_avg151 #152 '''153 print " -------------Trim default values--------------"154 print " "155 for ch in range(ch_min,ch_max):156 print "ch", '{:4d}'.format(ch),157 for d in range (d_min,d_max):158 print '{:4d}'.format(trim[ch][d]),159 print "\n"160 print " "161 print " "162 '''163 print " -------------Writing trim values---------------"164 for ch in range(ch_min,ch_max):165 print "ch: ", ch,166 for d in range (d_min,d_max):167 set_val_trim = trim[ch][d] + trim_offset168 if (set_val_trim < 0):169 set_val_trim = 0170 if (set_val_trim > 255):171 set_val_trim = 255172 print set_val_trim,173 disc = 61- 2*d174 sts_iface.write_check(ch,disc,set_val_trim)175 print trim[ch][31],176 sts_iface.write_check(ch,67,trim[ch][31])177 print str(sts_iface.read(ch,67) & 0xff )178 #print "\n"179 print " "180 print "<<<------------ DONE: set trim values ------------->>>"181 #print " "182 #print " "183 #print ">>------------ READING trim values ---------------<<"184 #for ch in range(ch_min,ch_max):185 #print "\nch: ", ch,186 #for d in range (d_min,d_max):187 #disc = 61- 2*d188 #val_f = sts_iface.read(ch,disc) & 0xff189 #print '{:4d}'.format(val_f),190 #print '{:4d}'.format(sts_iface.read(ch,67) & 0xff ),...
audio-selector.jsx
Source:audio-selector.jsx
1import React from 'react';2import PropTypes from 'prop-types';3import bindAll from 'lodash.bindall';4import AudioSelectorComponent from '../components/audio-trimmer/audio-selector.jsx';5import {getEventXY} from '../lib/touch-utils';6import DragRecognizer from '../lib/drag-recognizer';7const MIN_LENGTH = 0.01;8const MIN_DURATION = 500;9class AudioSelector extends React.Component {10 constructor (props) {11 super(props);12 bindAll(this, [13 'handleNewSelectionMouseDown',14 'handleTrimStartMouseDown',15 'handleTrimEndMouseDown',16 'handleTrimStartMouseMove',17 'handleTrimEndMouseMove',18 'handleTrimStartMouseUp',19 'handleTrimEndMouseUp',20 'storeRef'21 ]);22 this.state = {23 trimStart: props.trimStart,24 trimEnd: props.trimEnd25 };26 this.clickStartTime = 0;27 this.trimStartDragRecognizer = new DragRecognizer({28 onDrag: this.handleTrimStartMouseMove,29 onDragEnd: this.handleTrimStartMouseUp,30 touchDragAngle: 90,31 distanceThreshold: 032 });33 this.trimEndDragRecognizer = new DragRecognizer({34 onDrag: this.handleTrimEndMouseMove,35 onDragEnd: this.handleTrimEndMouseUp,36 touchDragAngle: 90,37 distanceThreshold: 038 });39 }40 componentWillReceiveProps (newProps) {41 const {trimStart, trimEnd} = this.props;42 if (newProps.trimStart === trimStart && newProps.trimEnd === trimEnd) return;43 this.setState({44 trimStart: newProps.trimStart,45 trimEnd: newProps.trimEnd46 });47 }48 clearSelection () {49 this.props.onSetTrim(null, null);50 }51 handleNewSelectionMouseDown (e) {52 const {width, left} = this.containerElement.getBoundingClientRect();53 this.initialTrimEnd = (getEventXY(e).x - left) / width;54 this.initialTrimStart = this.initialTrimEnd;55 this.props.onSetTrim(this.initialTrimStart, this.initialTrimEnd);56 this.clickStartTime = Date.now();57 this.containerSize = width;58 this.trimEndDragRecognizer.start(e);59 e.preventDefault();60 }61 handleTrimStartMouseMove (currentOffset, initialOffset) {62 const dx = (currentOffset.x - initialOffset.x) / this.containerSize;63 const newTrim = Math.max(0, Math.min(1, this.initialTrimStart + dx));64 if (newTrim > this.initialTrimEnd) {65 this.setState({66 trimStart: this.initialTrimEnd,67 trimEnd: newTrim68 });69 } else {70 this.setState({71 trimStart: newTrim,72 trimEnd: this.initialTrimEnd73 });74 }75 }76 handleTrimEndMouseMove (currentOffset, initialOffset) {77 const dx = (currentOffset.x - initialOffset.x) / this.containerSize;78 const newTrim = Math.min(1, Math.max(0, this.initialTrimEnd + dx));79 if (newTrim < this.initialTrimStart) {80 this.setState({81 trimStart: newTrim,82 trimEnd: this.initialTrimStart83 });84 } else {85 this.setState({86 trimStart: this.initialTrimStart,87 trimEnd: newTrim88 });89 }90 }91 handleTrimStartMouseUp () {92 this.props.onSetTrim(this.state.trimStart, this.state.trimEnd);93 }94 handleTrimEndMouseUp () {95 // If the selection was made quickly (tooFast) and is small (tooShort),96 // deselect instead. This allows click-to-deselect even if you drag97 // a little bit by accident. It also allows very quickly making a98 // selection, as long as it is above a minimum length.99 const tooFast = (Date.now() - this.clickStartTime) < MIN_DURATION;100 const tooShort = (this.state.trimEnd - this.state.trimStart) < MIN_LENGTH;101 if (tooFast && tooShort) {102 this.clearSelection();103 } else {104 this.props.onSetTrim(this.state.trimStart, this.state.trimEnd);105 }106 }107 handleTrimStartMouseDown (e) {108 this.containerSize = this.containerElement.getBoundingClientRect().width;109 this.trimStartDragRecognizer.start(e);110 this.initialTrimStart = this.props.trimStart;111 this.initialTrimEnd = this.props.trimEnd;112 e.stopPropagation();113 e.preventDefault();114 }115 handleTrimEndMouseDown (e) {116 this.containerSize = this.containerElement.getBoundingClientRect().width;117 this.trimEndDragRecognizer.start(e);118 this.initialTrimEnd = this.props.trimEnd;119 this.initialTrimStart = this.props.trimStart;120 e.stopPropagation();121 e.preventDefault();122 }123 storeRef (el) {124 this.containerElement = el;125 }126 render () {127 return (128 <AudioSelectorComponent129 containerRef={this.storeRef}130 playhead={this.props.playhead}131 trimEnd={this.state.trimEnd}132 trimStart={this.state.trimStart}133 onNewSelectionMouseDown={this.handleNewSelectionMouseDown}134 onTrimEndMouseDown={this.handleTrimEndMouseDown}135 onTrimStartMouseDown={this.handleTrimStartMouseDown}136 />137 );138 }139}140AudioSelector.propTypes = {141 onSetTrim: PropTypes.func,142 playhead: PropTypes.number,143 trimEnd: PropTypes.number,144 trimStart: PropTypes.number145};...
contact.server.model.js
Source:contact.server.model.js
1'use strict';2/**3 * Module dependencies.4 */5var mongoose = require('mongoose'),6 Schema = mongoose.Schema;7/**8 * Contact Schema9 */10var ContactSchema = new Schema({11 Name: {12 type: String,13 default: '',14 required: 'Name',15 trim: true16 },17 Code: {18 type: Number,19 trim: true20 },21 Type: {22 type: String,23 default: '',24 required: '',25 trim: true26 },27 ContactType: {28 type: String,29 default: '',30 required: '',31 trim: true32 },33 MailingName: {34 type: String,35 default: '',36 required: '',37 trim: true38 },39 Salutation: {40 type: String,41 default: '',42 required: '',43 trim: true44 },45 TAN: {46 type: String,47 default: '',48 required: '',49 trim: true50 },51 PAN: {52 type: String,53 default: '',54 required: '',55 trim: true56 },57 TIN: {58 type: String,59 default: '',60 required: '',61 trim: true62 },63 ServiceTaxNumber: {64 type: String,65 default: '',66 required: '',67 trim: true68 },69 AssignedToPartner: {70 type: String,71 default: '',72 required: '',73 trim: true74 },75 AssignedToManager: {76 type: String,77 default: '',78 required: '',79 trim: true80 },81 AssignedToEntities: {82 type: String,83 default: '',84 required: '',85 trim: true86 },87 AssignedToBranchLocation: {88 type: String,89 default: '',90 required: '',91 trim: true92 },93 PostalAddressAddressee: {94 type: String,95 default: '',96 required: '',97 trim: true98 },99 PostalAddressAddress: {100 type: String,101 default: '',102 required: '',103 trim: true104 },105 PostalAddressCity: {106 type: String,107 default: '',108 required: '',109 trim: true110 },111 PostalAddressState: {112 type: String,113 default: '',114 required: '',115 trim: true116 },117 PostalAddressPostcode: {118 type: Number,119 trim: true120 },121 PostalAddressCountry: {122 type: String,123 default: '',124 required: '',125 trim: true126 },127 CommunicationsWorkPhone: {128 type: Number,129 trim: true130 },131 CommunicationsMobile: {132 type: Number,133 trim: true134 },135 CommunicationsSkype: {136 type: String,137 default: '',138 required: '',139 trim: true140 },141 CommunicationsHomePhone: {142 type: Number,143 trim: true144 },145 CommunicationsFax: {146 type: Number,147 trim: true148 },149 CommunicationsTwitter: {150 type: String,151 default: '',152 required: '',153 trim: true154 },155 CommunicationsEmail: {156 type: String,157 default: '',158 required: '',159 trim: true160 },161 CommunicationsLinkedIn: {162 type: String,163 default: '',164 required: '',165 trim: true166 },167 CommunicationsWebsite: {168 type: String,169 default: '',170 required: '',171 trim: true172 },173 MoreAbouttheContactTaxYearEnd: {174 type: String,175 default: '',176 required: '',177 trim: true178 },179 MoreAbouttheContactClientType: {180 type: String,181 default: '',182 required: '',183 trim: true184 },185 MoreAbouttheContactClientTypeSubcategory: {186 type: String,187 default: '',188 required: '',189 trim: true190 },191 NoofEmployees: {192 type: Number,193 trim: true194 },195 Inbusinesssince: {196 type: String,197 default: '',198 required: '',199 trim: true200 },201 AnnualAccountsSchedulingAnnualAccountsMonth: {202 type: String,203 default: '',204 required: '',205 trim: true206 },207 ClientHistoryClientFrom: {208 type: String,209 default: '',210 required: '',211 trim: true212 },213 ClientHistoryClientUntil: {214 type: String,215 default: '',216 required: '',217 trim: true218 },219 created: {220 type: Date,221 default: Date.now222 },223 user: {224 type: Schema.ObjectId,225 ref: 'User'226 }227});...
string-trim.js
Source:string-trim.js
1description("This test checks String.trim(), String.trimLeft() and String.trimRight() methods.");2//references to trim(), trimLeft() and trimRight() functions for testing Function's *.call() and *.apply() methods3var trim = String.prototype.trim;4var trimLeft = String.prototype.trimLeft;5var trimRight = String.prototype.trimRight;6var testString = 'foo bar';7var trimString = '';8var leftTrimString = '';9var rightTrimString = '';10var wsString = '';11var whitespace = [12 {s : '\u0009', t : 'HORIZONTAL TAB'},13 {s : '\u000A', t : 'LINE FEED OR NEW LINE'},14 {s : '\u000B', t : 'VERTICAL TAB'},15 {s : '\u000C', t : 'FORMFEED'},16 {s : '\u000D', t : 'CARRIAGE RETURN'},17 {s : '\u0020', t : 'SPACE'},18 {s : '\u00A0', t : 'NO-BREAK SPACE'},19 {s : '\u2000', t : 'EN QUAD'},20 {s : '\u2001', t : 'EM QUAD'},21 {s : '\u2002', t : 'EN SPACE'},22 {s : '\u2003', t : 'EM SPACE'},23 {s : '\u2004', t : 'THREE-PER-EM SPACE'},24 {s : '\u2005', t : 'FOUR-PER-EM SPACE'},25 {s : '\u2006', t : 'SIX-PER-EM SPACE'},26 {s : '\u2007', t : 'FIGURE SPACE'},27 {s : '\u2008', t : 'PUNCTUATION SPACE'},28 {s : '\u2009', t : 'THIN SPACE'},29 {s : '\u200A', t : 'HAIR SPACE'},30 {s : '\u3000', t : 'IDEOGRAPHIC SPACE'},31 {s : '\u2028', t : 'LINE SEPARATOR'},32 {s : '\u2029', t : 'PARAGRAPH SEPARATOR'},33 {s : '\u200B', t : 'ZERO WIDTH SPACE (category Cf)'}34];35for (var i = 0; i < whitespace.length; i++) {36 shouldBe("whitespace["+i+"].s.trim()", "''");37 shouldBe("whitespace["+i+"].s.trimLeft()", "''");38 shouldBe("whitespace["+i+"].s.trimRight()", "''");39 wsString += whitespace[i].s;40}41trimString = wsString + testString + wsString;42leftTrimString = testString + wsString; //trimmed from the left43rightTrimString = wsString + testString; //trimmed from the right44 45shouldBe("wsString.trim()", "''");46shouldBe("wsString.trimLeft()", "''");47shouldBe("wsString.trimRight()", "''");48shouldBe("trimString.trim()", "testString");49shouldBe("trimString.trimLeft()", "leftTrimString");50shouldBe("trimString.trimRight()", "rightTrimString");51shouldBe("leftTrimString.trim()", "testString");52shouldBe("leftTrimString.trimLeft()", "leftTrimString");53shouldBe("leftTrimString.trimRight()", "testString");54 55shouldBe("rightTrimString.trim()", "testString");56shouldBe("rightTrimString.trimLeft()", "testString");57shouldBe("rightTrimString.trimRight()", "rightTrimString");58var testValues = ["0", "Infinity", "NaN", "true", "false", "({})", "({toString:function(){return 'wibble'}})", "['an','array']"];59for (var i = 0; i < testValues.length; i++) {60 shouldBe("trim.call("+testValues[i]+")", "'"+eval(testValues[i])+"'");61 shouldBe("trimLeft.call("+testValues[i]+")", "'"+eval(testValues[i])+"'");62 shouldBe("trimRight.call("+testValues[i]+")", "'"+eval(testValues[i])+"'");...
audio-trimmer.jsx
Source:audio-trimmer.jsx
1import React from 'react';2import PropTypes from 'prop-types';3import bindAll from 'lodash.bindall';4import AudioTrimmerComponent from '../components/audio-trimmer/audio-trimmer.jsx';5import DragRecognizer from '../lib/drag-recognizer';6const MIN_LENGTH = 0.01; // Used to stop sounds being trimmed smaller than 1%7class AudioTrimmer extends React.Component {8 constructor (props) {9 super(props);10 bindAll(this, [11 'handleTrimStartMouseDown',12 'handleTrimEndMouseDown',13 'handleTrimStartMouseMove',14 'handleTrimEndMouseMove',15 'storeRef'16 ]);17 this.trimStartDragRecognizer = new DragRecognizer({18 onDrag: this.handleTrimStartMouseMove,19 touchDragAngle: 90,20 distanceThreshold: 021 });22 this.trimEndDragRecognizer = new DragRecognizer({23 onDrag: this.handleTrimEndMouseMove,24 touchDragAngle: 90,25 distanceThreshold: 026 });27 }28 handleTrimStartMouseMove (currentOffset, initialOffset) {29 const dx = (currentOffset.x - initialOffset.x) / this.containerSize;30 const newTrim = Math.max(0, Math.min(this.props.trimEnd - MIN_LENGTH, this.initialTrim + dx));31 this.props.onSetTrimStart(newTrim);32 }33 handleTrimEndMouseMove (currentOffset, initialOffset) {34 const dx = (currentOffset.x - initialOffset.x) / this.containerSize;35 const newTrim = Math.min(1, Math.max(this.props.trimStart + MIN_LENGTH, this.initialTrim + dx));36 this.props.onSetTrimEnd(newTrim);37 }38 handleTrimStartMouseDown (e) {39 this.containerSize = this.containerElement.getBoundingClientRect().width;40 this.trimStartDragRecognizer.start(e);41 this.initialTrim = this.props.trimStart;42 e.stopPropagation();43 e.preventDefault();44 }45 handleTrimEndMouseDown (e) {46 this.containerSize = this.containerElement.getBoundingClientRect().width;47 this.trimEndDragRecognizer.start(e);48 this.initialTrim = this.props.trimEnd;49 e.stopPropagation();50 e.preventDefault();51 }52 storeRef (el) {53 this.containerElement = el;54 }55 render () {56 return (57 <AudioTrimmerComponent58 containerRef={this.storeRef}59 playhead={this.props.playhead}60 trimEnd={this.props.trimEnd}61 trimStart={this.props.trimStart}62 onTrimEndMouseDown={this.handleTrimEndMouseDown}63 onTrimStartMouseDown={this.handleTrimStartMouseDown}64 />65 );66 }67}68AudioTrimmer.propTypes = {69 onSetTrimEnd: PropTypes.func,70 onSetTrimStart: PropTypes.func,71 playhead: PropTypes.number,72 trimEnd: PropTypes.number,73 trimStart: PropTypes.number74};...
Using AI Code Generation
1var BestBuy = require('bestbuy')(process.env.BEST_BUY_API_KEY);2BestBuy.products('', {page: 1, pageSize: 1, show: 'sku,name,salePrice,shortDescription,image', sort: 'name.asc', type: 'all', format: 'json'})3 .then(function(data) {4 console.log(data);5 })6 .catch(function(err) {7 console.error(err);8 });9var BestBuy = require('bestbuy')(process.env.BEST_BUY_API_KEY);10BestBuy.products('', {page: 1, pageSize: 1, show: 'sku,name,salePrice,shortDescription,image', sort: 'name.asc', type: 'all', format: 'json'})11 .then(function(data) {12 console.log(data);13 })14 .catch(function(err) {15 console.error(err);16 });17var BestBuy = require('bestbuy')(process.env.BEST_BUY_API_KEY);18BestBuy.products('', {page: 1, pageSize: 1, show: 'sku,name,salePrice,shortDescription,image', sort: 'name.asc', type: 'all', format: 'json'})19 .then(function(data) {20 console.log(data);21 })22 .catch(function(err) {23 console.error(err);24 });25var BestBuy = require('bestbuy')(process.env.BEST_BUY_API_KEY);26BestBuy.products('', {page: 1, pageSize: 1, show: 'sku,name,salePrice,shortDescription,image', sort: 'name.asc', type: 'all', format: 'json'})27 .then(function(data) {28 console.log(data);29 })30 .catch(function(err) {31 console.error(err);32 });33var BestBuy = require('bestbuy')(process.env.BEST_BUY_API_KEY);34BestBuy.products('', {page: 1, pageSize: 1, show: 'sku,name,salePrice,shortDescription,image', sort: 'name.asc', type: 'all', format: 'json'})35 .then(function(data)
Using AI Code Generation
1var request = require('request');2var options = {3 headers: {4 }5};6request(options, function (error, response, body) {7 if (!error && response.statusCode == 200) {8 var info = JSON.parse(body);9 console.log(info);10 }11})
Using AI Code Generation
1var bestMatch = require('best-match');2var input = ' a b c d e f g h i j k l m n o p q r s t u v w x y z ';3var output = bestMatch.trim(input);4console.log(output);5var bestMatch = require('best-match');6var input = 'a b c d e f g h i j k l m n o p q r s t u v w x y z';7var output = bestMatch.toTitleCase(input);8console.log(output);9var bestMatch = require('best-match');10var input = 'a b c d e f g h i j k l m n o p q r s t u v w x y z';11var output = bestMatch.toCamelCase(input);12console.log(output);13var bestMatch = require('best-match');14var input = 'a b c d e f g h i j k l m n o p q r s t u v w x y z';15var output = bestMatch.toPascalCase(input);16console.log(output);17var bestMatch = require('best-match');
Using AI Code Generation
1var bestString = require('beststring');2var str = " Hello World! ";3console.log(bestString.trim(str));4var bestString = require('beststring');5var str = " Hello World! ";6console.log(bestString.trimEnd(str));7var bestString = require('beststring');8var str = " Hello World! ";9console.log(bestString.trimStart(str));10var bestString = require('beststring');11var str = " Hello World! ";12console.log(bestString.truncate(str, 5));13var bestString = require('beststring');14var str = " Hello World! ";15console.log(bestString.unescape(str));16var bestString = require('beststring');17var str = " Hello World! ";18console.log(bestString.upperCase(str));19var bestString = require('beststring');20var str = " Hello World! ";21console.log(bestString.upperFirst(str));22var bestString = require('beststring');23var str = " Hello World! ";24console.log(bestString.words(str));25var bestString = require('beststring');26var str = " Hello World! ";27console.log(bestString.wrap(str, '(', ')'));
Using AI Code Generation
1var BestString = require('best-string');2var str = new BestString(' Hello World! ');3console.log(str.trim());4var str = new BestString(string, [options]);5##### trim()6##### trimLeft()7##### trimRight()8##### replaceAll(searchValue, replaceValue)9##### startsWith(searchString)10##### endsWith(searchString)11##### contains(searchString)12##### containsAny(searchString)13##### containsAll(searchString)14##### indexOfAny(searchString)15##### indexOfAll(searchString)16##### lastIndexOfAny(searchString)17##### lastIndexOfAll(searchString)18##### toLowerCase()19##### toUpperCase()
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!