Best Syzkaller code snippet using vmware.Diagnose
cni-antrea.go
Source:cni-antrea.go
1package cloudprovider2var AntreaDefaultApplyConfigs = []string {3`apiVersion: v14kind: ServiceAccount5metadata:6 labels:7 app: antrea8 name: antctl9 namespace: kube-system10`,11`apiVersion: v112kind: ServiceAccount13metadata:14 labels:15 app: antrea16 name: antrea-agent17 namespace: kube-system18`,19`apiVersion: v120kind: ServiceAccount21metadata:22 labels:23 app: antrea24 name: antrea-controller25 namespace: kube-system26`,27`apiVersion: rbac.authorization.k8s.io/v128kind: ClusterRole29metadata:30 labels:31 app: antrea32 rbac.authorization.k8s.io/aggregate-to-admin: "true"33 rbac.authorization.k8s.io/aggregate-to-edit: "true"34 name: aggregate-antrea-clustergroups-edit35rules:36- apiGroups:37 - core.antrea.tanzu.vmware.com38 resources:39 - clustergroups40 verbs:41 - get42 - list43 - watch44 - create45 - update46 - patch47 - delete48`,49`apiVersion: rbac.authorization.k8s.io/v150kind: ClusterRole51metadata:52 labels:53 app: antrea54 rbac.authorization.k8s.io/aggregate-to-view: "true"55 name: aggregate-antrea-clustergroups-view56rules:57- apiGroups:58 - core.antrea.tanzu.vmware.com59 resources:60 - clustergroups61 verbs:62 - get63 - list64 - watch65`,66`apiVersion: rbac.authorization.k8s.io/v167kind: ClusterRole68metadata:69 labels:70 app: antrea71 rbac.authorization.k8s.io/aggregate-to-admin: "true"72 rbac.authorization.k8s.io/aggregate-to-edit: "true"73 name: aggregate-antrea-policies-edit74rules:75- apiGroups:76 - security.antrea.tanzu.vmware.com77 resources:78 - clusternetworkpolicies79 - networkpolicies80 verbs:81 - get82 - list83 - watch84 - create85 - update86 - patch87 - delete88`,89`apiVersion: rbac.authorization.k8s.io/v190kind: ClusterRole91metadata:92 labels:93 app: antrea94 rbac.authorization.k8s.io/aggregate-to-view: "true"95 name: aggregate-antrea-policies-view96rules:97- apiGroups:98 - security.antrea.tanzu.vmware.com99 resources:100 - clusternetworkpolicies101 - networkpolicies102 verbs:103 - get104 - list105 - watch106`,107`apiVersion: rbac.authorization.k8s.io/v1108kind: ClusterRole109metadata:110 labels:111 app: antrea112 rbac.authorization.k8s.io/aggregate-to-admin: "true"113 rbac.authorization.k8s.io/aggregate-to-edit: "true"114 name: aggregate-traceflows-edit115rules:116- apiGroups:117 - ops.antrea.tanzu.vmware.com118 resources:119 - traceflows120 verbs:121 - get122 - list123 - watch124 - create125 - update126 - patch127 - delete128`,129`apiVersion: rbac.authorization.k8s.io/v1130kind: ClusterRole131metadata:132 labels:133 app: antrea134 rbac.authorization.k8s.io/aggregate-to-view: "true"135 name: aggregate-traceflows-view136rules:137- apiGroups:138 - ops.antrea.tanzu.vmware.com139 resources:140 - traceflows141 verbs:142 - get143 - list144 - watch145`,146`apiVersion: rbac.authorization.k8s.io/v1147kind: ClusterRole148metadata:149 labels:150 app: antrea151 name: antctl152rules:153- apiGroups:154 - controlplane.antrea.tanzu.vmware.com155 - networking.antrea.tanzu.vmware.com156 resources:157 - networkpolicies158 - appliedtogroups159 - addressgroups160 verbs:161 - get162 - list163- apiGroups:164 - stats.antrea.tanzu.vmware.com165 resources:166 - networkpolicystats167 - antreaclusternetworkpolicystats168 - antreanetworkpolicystats169 verbs:170 - get171 - list172- apiGroups:173 - system.antrea.tanzu.vmware.com174 resources:175 - controllerinfos176 - agentinfos177 verbs:178 - get179- apiGroups:180 - system.antrea.tanzu.vmware.com181 resources:182 - supportbundles183 verbs:184 - get185 - post186- apiGroups:187 - system.antrea.tanzu.vmware.com188 resources:189 - supportbundles/download190 verbs:191 - get192- nonResourceURLs:193 - /agentinfo194 - /addressgroups195 - /appliedtogroups196 - /loglevel197 - /networkpolicies198 - /ovsflows199 - /ovstracing200 - /podinterfaces201 verbs:202 - get203`,204`apiVersion: rbac.authorization.k8s.io/v1205kind: ClusterRole206metadata:207 labels:208 app: antrea209 name: antrea-agent210rules:211- apiGroups:212 - ""213 resources:214 - nodes215 verbs:216 - get217 - watch218 - list219- apiGroups:220 - ""221 resources:222 - pods223 verbs:224 - get225 - watch226 - list227 - patch228- apiGroups:229 - ""230 resources:231 - endpoints232 - services233 verbs:234 - get235 - watch236 - list237- apiGroups:238 - discovery.k8s.io239 resources:240 - endpointslices241 verbs:242 - get243 - watch244 - list245- apiGroups:246 - clusterinformation.antrea.tanzu.vmware.com247 resources:248 - antreaagentinfos249 verbs:250 - get251 - create252 - update253 - delete254- apiGroups:255 - controlplane.antrea.tanzu.vmware.com256 - networking.antrea.tanzu.vmware.com257 resources:258 - networkpolicies259 - appliedtogroups260 - addressgroups261 verbs:262 - get263 - watch264 - list265- apiGroups:266 - controlplane.antrea.tanzu.vmware.com267 resources:268 - nodestatssummaries269 verbs:270 - create271- apiGroups:272 - controlplane.antrea.tanzu.vmware.com273 resources:274 - networkpolicies/status275 verbs:276 - create277 - get278- apiGroups:279 - authentication.k8s.io280 resources:281 - tokenreviews282 verbs:283 - create284- apiGroups:285 - authorization.k8s.io286 resources:287 - subjectaccessreviews288 verbs:289 - create290- apiGroups:291 - ""292 resourceNames:293 - extension-apiserver-authentication294 resources:295 - configmaps296 verbs:297 - get298 - list299 - watch300- apiGroups:301 - ""302 resourceNames:303 - antrea-ca304 resources:305 - configmaps306 verbs:307 - get308 - watch309 - list310- apiGroups:311 - ops.antrea.tanzu.vmware.com312 resources:313 - traceflows314 - traceflows/status315 verbs:316 - get317 - watch318 - list319 - update320 - patch321 - create322 - delete323`,324`apiVersion: rbac.authorization.k8s.io/v1325kind: ClusterRole326metadata:327 labels:328 app: antrea329 name: antrea-controller330rules:331- apiGroups:332 - ""333 resources:334 - nodes335 - pods336 - namespaces337 - services338 verbs:339 - get340 - watch341 - list342- apiGroups:343 - networking.k8s.io344 resources:345 - networkpolicies346 verbs:347 - get348 - watch349 - list350- apiGroups:351 - clusterinformation.antrea.tanzu.vmware.com352 resources:353 - antreacontrollerinfos354 verbs:355 - get356 - create357 - update358 - delete359- apiGroups:360 - clusterinformation.antrea.tanzu.vmware.com361 resources:362 - antreaagentinfos363 verbs:364 - list365 - delete366- apiGroups:367 - authentication.k8s.io368 resources:369 - tokenreviews370 verbs:371 - create372- apiGroups:373 - authorization.k8s.io374 resources:375 - subjectaccessreviews376 verbs:377 - create378- apiGroups:379 - ""380 resourceNames:381 - extension-apiserver-authentication382 resources:383 - configmaps384 verbs:385 - get386 - list387 - watch388- apiGroups:389 - ""390 resourceNames:391 - antrea-ca392 resources:393 - configmaps394 verbs:395 - get396 - update397- apiGroups:398 - apiregistration.k8s.io399 resourceNames:400 - v1alpha1.stats.antrea.tanzu.vmware.com401 - v1beta1.system.antrea.tanzu.vmware.com402 - v1beta2.controlplane.antrea.tanzu.vmware.com403 - v1beta1.controlplane.antrea.tanzu.vmware.com404 - v1beta1.networking.antrea.tanzu.vmware.com405 resources:406 - apiservices407 verbs:408 - get409 - update410- apiGroups:411 - admissionregistration.k8s.io412 resourceNames:413 - crdmutator.antrea.tanzu.vmware.com414 - crdvalidator.antrea.tanzu.vmware.com415 resources:416 - mutatingwebhookconfigurations417 - validatingwebhookconfigurations418 verbs:419 - get420 - update421- apiGroups:422 - security.antrea.tanzu.vmware.com423 resources:424 - clusternetworkpolicies425 - networkpolicies426 verbs:427 - get428 - watch429 - list430- apiGroups:431 - security.antrea.tanzu.vmware.com432 resources:433 - clusternetworkpolicies/status434 - networkpolicies/status435 verbs:436 - update437- apiGroups:438 - security.antrea.tanzu.vmware.com439 resources:440 - tiers441 verbs:442 - get443 - watch444 - list445 - create446 - update447- apiGroups:448 - ops.antrea.tanzu.vmware.com449 resources:450 - traceflows451 - traceflows/status452 verbs:453 - get454 - watch455 - list456 - update457 - patch458 - create459 - delete460- apiGroups:461 - core.antrea.tanzu.vmware.com462 resources:463 - externalentities464 - clustergroups465 verbs:466 - get467 - watch468 - list469- apiGroups:470 - core.antrea.tanzu.vmware.com471 resources:472 - clustergroups/status473 verbs:474 - update475`,476`apiVersion: rbac.authorization.k8s.io/v1477kind: ClusterRoleBinding478metadata:479 labels:480 app: antrea481 name: antctl482 namespace: kube-system483roleRef:484 apiGroup: rbac.authorization.k8s.io485 kind: ClusterRole486 name: antctl487subjects:488- kind: ServiceAccount489 name: antctl490 namespace: kube-system491`,492`apiVersion: rbac.authorization.k8s.io/v1493kind: ClusterRoleBinding494metadata:495 labels:496 app: antrea497 name: antrea-agent498roleRef:499 apiGroup: rbac.authorization.k8s.io500 kind: ClusterRole501 name: antrea-agent502subjects:503- kind: ServiceAccount504 name: antrea-agent505 namespace: kube-system506`,507`apiVersion: rbac.authorization.k8s.io/v1508kind: ClusterRoleBinding509metadata:510 labels:511 app: antrea512 name: antrea-controller513roleRef:514 apiGroup: rbac.authorization.k8s.io515 kind: ClusterRole516 name: antrea-controller517subjects:518- kind: ServiceAccount519 name: antrea-controller520 namespace: kube-system521`,522`apiVersion: v1523kind: ConfigMap524metadata:525 labels:526 app: antrea527 name: antrea-ca528 namespace: kube-system529`,530`apiVersion: v1531data:532 antrea-agent.conf: |533 # FeatureGates is a map of feature names to bools that enable or disable experimental features.534 featureGates:535 # Enable AntreaProxy which provides ServiceLB for in-cluster Services in antrea-agent.536 # It should be enabled on Windows, otherwise NetworkPolicy will not take effect on537 # Service traffic.538 # AntreaProxy: true539 # Enable EndpointSlice support in AntreaProxy. Don't enable this feature unless that EndpointSlice540 # API version v1beta1 is supported and set as enabled in Kubernetes. If AntreaProxy is not enabled,541 # this flag will not take effect.542 # EndpointSlice: false543 # Enable traceflow which provides packet tracing feature to diagnose network issue.544 # Traceflow: true545 # Enable NodePortLocal feature to make the pods reachable externally through NodePort546 # NodePortLocal: false547 # Enable Antrea ClusterNetworkPolicy feature to complement K8s NetworkPolicy for cluster admins548 # to define security policies which apply to the entire cluster, and Antrea NetworkPolicy549 # feature that supports priorities, rule actions and externalEntities in the future.550 # AntreaPolicy: false551 # Enable flowexporter which exports polled conntrack connections as IPFIX flow records from each552 # agent to a configured collector.553 # FlowExporter: false554 # Enable collecting and exposing NetworkPolicy statistics.555 # NetworkPolicyStats: false556 # Name of the OpenVSwitch bridge antrea-agent will create and use.557 # Make sure it doesn't conflict with your existing OpenVSwitch bridges.558 #ovsBridge: br-int559 # Datapath type to use for the OpenVSwitch bridge created by Antrea. Supported values are:560 # - system561 # - netdev562 # 'system' is the default value and corresponds to the kernel datapath. Use 'netdev' to run563 # OVS in userspace mode. Userspace mode requires the tun device driver to be available.564 #ovsDatapathType: system565 # Name of the interface antrea-agent will create and use for host <--> pod communication.566 # Make sure it doesn't conflict with your existing interfaces.567 #hostGateway: antrea-gw0568 # Determines how traffic is encapsulated. It has the following options:569 # encap(default): Inter-node Pod traffic is always encapsulated and Pod to external network570 # traffic is SNAT'd.571 # noEncap: Inter-node Pod traffic is not encapsulated; Pod to external network traffic is572 # SNAT'd if noSNAT is not set to true. Underlying network must be capable of573 # supporting Pod traffic across IP subnets.574 # hybrid: noEncap if source and destination Nodes are on the same subnet, otherwise encap.575 # networkPolicyOnly: Antrea enforces NetworkPolicy only, and utilizes CNI chaining and delegates Pod576 # IPAM and connectivity to the primary CNI.577 #578 #trafficEncapMode: encap579 # Whether or not to SNAT (using the Node IP) the egress traffic from a Pod to the external network.580 # This option is for the noEncap traffic mode only, and the default value is false. In the noEncap581 # mode, if the cluster's Pod CIDR is reachable from the external network, then the Pod traffic to582 # the external network needs not be SNAT'd. In the networkPolicyOnly mode, antrea-agent never583 # performs SNAT and this option will be ignored; for other modes it must be set to false.584 #noSNAT: false585 # Tunnel protocols used for encapsulating traffic across Nodes. Supported values:586 # - geneve (default)587 # - vxlan588 # - gre589 # - stt590 #tunnelType: geneve591 # Default MTU to use for the host gateway interface and the network interface of each Pod.592 # If omitted, antrea-agent will discover the MTU of the Node's primary interface and593 # also adjust MTU to accommodate for tunnel encapsulation overhead (if applicable).594 #defaultMTU: 1450595 # Whether or not to enable IPsec encryption of tunnel traffic. IPsec encryption is only supported596 # for the GRE tunnel type.597 #enableIPSecTunnel: false598 # ClusterIP CIDR range for Services. It's required when AntreaProxy is not enabled, and should be599 # set to the same value as the one specified by --service-cluster-ip-range for kube-apiserver. When600 # AntreaProxy is enabled, this parameter is not needed and will be ignored if provided.601 #serviceCIDR: 10.96.0.0/12602 # ClusterIP CIDR range for IPv6 Services. It's required when using kube-proxy to provide IPv6 Service in a Dual-Stack603 # cluster or an IPv6 only cluster. The value should be the same as the configuration for kube-apiserver specified by604 # --service-cluster-ip-range. When AntreaProxy is enabled, this parameter is not needed.605 # No default value for this field.606 #serviceCIDRv6:607 # The port for the antrea-agent APIServer to serve on.608 # Note that if it's set to another value, the containerPort of the api port of the609 # antrea-agent container must be set to the same value.610 #apiPort: 10350611 # Enable metrics exposure via Prometheus. Initializes Prometheus metrics listener.612 #enablePrometheusMetrics: true613 # Provide the IPFIX collector address as a string with format <HOST>:[<PORT>][:<PROTO>].614 # HOST can either be the DNS name or the IP of the Flow Collector. For example,615 # "flow-aggregator.flow-aggregator.svc" can be provided as DNS name to connect616 # to the Antrea Flow Aggregator service. If IP, it can be either IPv4 or IPv6.617 # However, IPv6 address should be wrapped with [].618 # If PORT is empty, we default to 4739, the standard IPFIX port.619 # If no PROTO is given, we consider "tcp" as default. We support "tcp" and "udp"620 # L4 transport protocols.621 #flowCollectorAddr: "flow-aggregator.flow-aggregator.svc:4739:tcp"622 # Provide flow poll interval as a duration string. This determines how often the flow exporter dumps connections from the conntrack module.623 # Flow poll interval should be greater than or equal to 1s (one second).624 # Valid time units are "ns", "us" (or "μs"), "ms", "s", "m", "h".625 #flowPollInterval: "5s"626 # Provide flow export frequency, which is the number of poll cycles elapsed before flow exporter exports flow records to627 # the flow collector.628 # Flow export frequency should be greater than or equal to 1.629 #flowExportFrequency: 12630 # Enable TLS communication from flow exporter to flow aggregator.631 #enableTLSToFlowAggregator: true632 # Provide the port range used by NodePortLocal. When the NodePortLocal feature is enabled, a port from that range will be assigned633 # whenever a Pod's container defines a specific port to be exposed (each container can define a list of ports as pod.spec.containers[].ports),634 # and all Node traffic directed to that port will be forwarded to the Pod.635 #nplPortRange: 40000-41000636 # Provide the address of Kubernetes apiserver, to override any value provided in kubeconfig or InClusterConfig.637 # Defaults to "". It must be a host string, a host:port pair, or a URL to the base of the apiserver.638 #kubeAPIServerOverride: ""639 # Comma-separated list of Cipher Suites. If omitted, the default Go Cipher Suites will be used.640 # https://golang.org/pkg/crypto/tls/#pkg-constants641 # Note that TLS1.3 Cipher Suites cannot be added to the list. But the apiserver will always642 # prefer TLS1.3 Cipher Suites whenever possible.643 #tlsCipherSuites:644 # TLS min version from: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13.645 #tlsMinVersion:646 antrea-cni.conflist: |647 {648 "cniVersion":"0.3.0",649 "name": "antrea",650 "plugins": [651 {652 "type": "antrea",653 "ipam": {654 "type": "host-local"655 }656 },657 {658 "type": "portmap",659 "capabilities": {"portMappings": true}660 },661 {662 "type": "bandwidth",663 "capabilities": {"bandwidth": true}664 }665 ]666 }667 antrea-controller.conf: |668 # FeatureGates is a map of feature names to bools that enable or disable experimental features.669 featureGates:670 # Enable traceflow which provides packet tracing feature to diagnose network issue.671 # Traceflow: true672 # Enable Antrea ClusterNetworkPolicy feature to complement K8s NetworkPolicy for cluster admins673 # to define security policies which apply to the entire cluster, and Antrea NetworkPolicy674 # feature that supports priorities, rule actions and externalEntities in the future.675 # AntreaPolicy: false676 # Enable collecting and exposing NetworkPolicy statistics.677 # NetworkPolicyStats: false678 # The port for the antrea-controller APIServer to serve on.679 # Note that if it's set to another value, the containerPort of the api port of the680 # antrea-controller container must be set to the same value.681 #apiPort: 10349682 # Enable metrics exposure via Prometheus. Initializes Prometheus metrics listener.683 #enablePrometheusMetrics: true684 # Indicates whether to use auto-generated self-signed TLS certificate.685 # If false, A Secret named "antrea-controller-tls" must be provided with the following keys:686 # ca.crt: <CA certificate>687 # tls.crt: <TLS certificate>688 # tls.key: <TLS private key>689 # And the Secret must be mounted to directory "/var/run/antrea/antrea-controller-tls" of the690 # antrea-controller container.691 #selfSignedCert: true692 # Comma-separated list of Cipher Suites. If omitted, the default Go Cipher Suites will be used.693 # https://golang.org/pkg/crypto/tls/#pkg-constants694 # Note that TLS1.3 Cipher Suites cannot be added to the list. But the apiserver will always695 # prefer TLS1.3 Cipher Suites whenever possible.696 #tlsCipherSuites:697 # TLS min version from: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13.698 #tlsMinVersion:699kind: ConfigMap700metadata:701 annotations: {}702 labels:703 app: antrea704 name: antrea-config-md64tc85t9705 namespace: kube-system706`,707`apiVersion: v1708kind: Service709metadata:710 labels:711 app: antrea712 name: antrea713 namespace: kube-system714spec:715 ports:716 - port: 443717 protocol: TCP718 targetPort: api719 selector:720 app: antrea721 component: antrea-controller722`,723`apiVersion: apps/v1724kind: Deployment725metadata:726 labels:727 app: antrea728 component: antrea-controller729 name: antrea-controller730 namespace: kube-system731spec:732 replicas: 1733 selector:734 matchLabels:735 app: antrea736 component: antrea-controller737 strategy:738 type: Recreate739 template:740 metadata:741 labels:742 app: antrea743 component: antrea-controller744 spec:745 containers:746 - args:747 - --config748 - /etc/antrea/antrea-controller.conf749 - --logtostderr=false750 - --log_dir=/var/log/antrea751 - --alsologtostderr752 - --log_file_max_size=100753 - --log_file_max_num=4754 - --v=0755 command:756 - antrea-controller757 env:758 - name: POD_NAME759 valueFrom:760 fieldRef:761 fieldPath: metadata.name762 - name: POD_NAMESPACE763 valueFrom:764 fieldRef:765 fieldPath: metadata.namespace766 - name: NODE_NAME767 valueFrom:768 fieldRef:769 fieldPath: spec.nodeName770 - name: SERVICEACCOUNT_NAME771 valueFrom:772 fieldRef:773 fieldPath: spec.serviceAccountName774 image: k8s.gcr.io.ics.com/antrea/antrea-ubuntu:v0.13.1775 livenessProbe:776 failureThreshold: 5777 httpGet:778 host: 127.0.0.1779 path: /livez780 port: api781 scheme: HTTPS782 periodSeconds: 10783 timeoutSeconds: 5784 name: antrea-controller785 ports:786 - containerPort: 10349787 name: api788 protocol: TCP789 readinessProbe:790 failureThreshold: 5791 httpGet:792 host: 127.0.0.1793 path: /readyz794 port: api795 scheme: HTTPS796 initialDelaySeconds: 5797 periodSeconds: 10798 timeoutSeconds: 5799 resources:800 requests:801 cpu: 200m802 volumeMounts:803 - mountPath: /etc/antrea/antrea-controller.conf804 name: antrea-config805 readOnly: true806 subPath: antrea-controller.conf807 - mountPath: /var/run/antrea/antrea-controller-tls808 name: antrea-controller-tls809 - mountPath: /var/log/antrea810 name: host-var-log-antrea811 hostNetwork: true812 nodeSelector:813 kubernetes.io/os: linux814 priorityClassName: system-cluster-critical815 serviceAccountName: antrea-controller816 tolerations:817 - key: CriticalAddonsOnly818 operator: Exists819 - effect: NoSchedule820 key: node-role.kubernetes.io/master821 volumes:822 - configMap:823 name: antrea-config-md64tc85t9824 name: antrea-config825 - name: antrea-controller-tls826 secret:827 defaultMode: 256828 optional: true829 secretName: antrea-controller-tls830 - hostPath:831 path: /var/log/antrea832 type: DirectoryOrCreate833 name: host-var-log-antrea834`,835`apiVersion: apiregistration.k8s.io/v1836kind: APIService837metadata:838 labels:839 app: antrea840 name: v1alpha1.stats.antrea.tanzu.vmware.com841spec:842 group: stats.antrea.tanzu.vmware.com843 groupPriorityMinimum: 100844 service:845 name: antrea846 namespace: kube-system847 version: v1alpha1848 versionPriority: 100849`,850`apiVersion: apiregistration.k8s.io/v1851kind: APIService852metadata:853 labels:854 app: antrea855 name: v1beta1.controlplane.antrea.tanzu.vmware.com856spec:857 group: controlplane.antrea.tanzu.vmware.com858 groupPriorityMinimum: 100859 service:860 name: antrea861 namespace: kube-system862 version: v1beta1863 versionPriority: 100864`,865`apiVersion: apiregistration.k8s.io/v1866kind: APIService867metadata:868 labels:869 app: antrea870 name: v1beta1.networking.antrea.tanzu.vmware.com871spec:872 group: networking.antrea.tanzu.vmware.com873 groupPriorityMinimum: 100874 service:875 name: antrea876 namespace: kube-system877 version: v1beta1878 versionPriority: 100879`,880`apiVersion: apiregistration.k8s.io/v1881kind: APIService882metadata:883 labels:884 app: antrea885 name: v1beta1.system.antrea.tanzu.vmware.com886spec:887 group: system.antrea.tanzu.vmware.com888 groupPriorityMinimum: 100889 service:890 name: antrea891 namespace: kube-system892 version: v1beta1893 versionPriority: 100894`,895`apiVersion: apiregistration.k8s.io/v1896kind: APIService897metadata:898 labels:899 app: antrea900 name: v1beta2.controlplane.antrea.tanzu.vmware.com901spec:902 group: controlplane.antrea.tanzu.vmware.com903 groupPriorityMinimum: 100904 service:905 name: antrea906 namespace: kube-system907 version: v1beta2908 versionPriority: 100909`,910`apiVersion: apps/v1911kind: DaemonSet912metadata:913 labels:914 app: antrea915 component: antrea-agent916 name: antrea-agent917 namespace: kube-system918spec:919 selector:920 matchLabels:921 app: antrea922 component: antrea-agent923 template:924 metadata:925 labels:926 app: antrea927 component: antrea-agent928 spec:929 containers:930 - args:931 - --config932 - /etc/antrea/antrea-agent.conf933 - --logtostderr=false934 - --log_dir=/var/log/antrea935 - --alsologtostderr936 - --log_file_max_size=100937 - --log_file_max_num=4938 - --v=0939 command:940 - antrea-agent941 env:942 - name: POD_NAME943 valueFrom:944 fieldRef:945 fieldPath: metadata.name946 - name: POD_NAMESPACE947 valueFrom:948 fieldRef:949 fieldPath: metadata.namespace950 - name: NODE_NAME951 valueFrom:952 fieldRef:953 fieldPath: spec.nodeName954 image: k8s.gcr.io.ics.com/antrea/antrea-ubuntu:v0.13.1955 livenessProbe:956 exec:957 command:958 - /bin/sh959 - -c960 - container_liveness_probe agent961 failureThreshold: 5962 initialDelaySeconds: 5963 periodSeconds: 10964 timeoutSeconds: 5965 name: antrea-agent966 ports:967 - containerPort: 10350968 name: api969 protocol: TCP970 readinessProbe:971 failureThreshold: 5972 httpGet:973 host: 127.0.0.1974 path: /readyz975 port: api976 scheme: HTTPS977 initialDelaySeconds: 5978 periodSeconds: 10979 timeoutSeconds: 5980 resources:981 requests:982 cpu: 200m983 securityContext:984 privileged: true985 volumeMounts:986 - mountPath: /etc/antrea/antrea-agent.conf987 name: antrea-config988 readOnly: true989 subPath: antrea-agent.conf990 - mountPath: /var/run/antrea991 name: host-var-run-antrea992 - mountPath: /var/run/openvswitch993 name: host-var-run-antrea994 subPath: openvswitch995 - mountPath: /var/lib/cni996 name: host-var-run-antrea997 subPath: cni998 - mountPath: /var/log/antrea999 name: host-var-log-antrea1000 - mountPath: /host/proc1001 name: host-proc1002 readOnly: true1003 - mountPath: /host/var/run/netns1004 mountPropagation: HostToContainer1005 name: host-var-run-netns1006 readOnly: true1007 - mountPath: /run/xtables.lock1008 name: xtables-lock1009 - args:1010 - --log_file_max_size=1001011 - --log_file_max_num=41012 command:1013 - start_ovs1014 image: k8s.gcr.io.ics.com/antrea/antrea-ubuntu:v0.13.11015 livenessProbe:1016 exec:1017 command:1018 - /bin/sh1019 - -c1020 - timeout 10 container_liveness_probe ovs1021 failureThreshold: 51022 initialDelaySeconds: 51023 periodSeconds: 101024 timeoutSeconds: 101025 name: antrea-ovs1026 resources:1027 requests:1028 cpu: 200m1029 securityContext:1030 capabilities:1031 add:1032 - SYS_NICE1033 - NET_ADMIN1034 - SYS_ADMIN1035 - IPC_LOCK1036 volumeMounts:1037 - mountPath: /var/run/openvswitch1038 name: host-var-run-antrea1039 subPath: openvswitch1040 - mountPath: /var/log/openvswitch1041 name: host-var-log-antrea1042 subPath: openvswitch1043 dnsPolicy: ClusterFirstWithHostNet1044 hostNetwork: true1045 initContainers:1046 - command:1047 - install_cni1048 image: k8s.gcr.io.ics.com/antrea/antrea-ubuntu:v0.13.11049 name: install-cni1050 resources:1051 requests:1052 cpu: 100m1053 securityContext:1054 capabilities:1055 add:1056 - SYS_MODULE1057 volumeMounts:1058 - mountPath: /etc/antrea/antrea-cni.conflist1059 name: antrea-config1060 readOnly: true1061 subPath: antrea-cni.conflist1062 - mountPath: /host/etc/cni/net.d1063 name: host-cni-conf1064 - mountPath: /host/opt/cni/bin1065 name: host-cni-bin1066 - mountPath: /lib/modules1067 name: host-lib-modules1068 readOnly: true1069 - mountPath: /var/run/antrea1070 name: host-var-run-antrea1071 nodeSelector:1072 kubernetes.io/os: linux1073 priorityClassName: system-node-critical1074 serviceAccountName: antrea-agent1075 tolerations:1076 - key: CriticalAddonsOnly1077 operator: Exists1078 - effect: NoSchedule1079 operator: Exists1080 - effect: NoExecute1081 operator: Exists1082 volumes:1083 - configMap:1084 name: antrea-config-md64tc85t91085 name: antrea-config1086 - hostPath:1087 path: /etc/cni/net.d1088 name: host-cni-conf1089 - hostPath:1090 path: /opt/cni/bin1091 name: host-cni-bin1092 - hostPath:1093 path: /proc1094 name: host-proc1095 - hostPath:1096 path: /var/run/netns1097 name: host-var-run-netns1098 - hostPath:1099 path: /var/run/antrea1100 type: DirectoryOrCreate1101 name: host-var-run-antrea1102 - hostPath:1103 path: /var/log/antrea1104 type: DirectoryOrCreate1105 name: host-var-log-antrea1106 - hostPath:1107 path: /lib/modules1108 name: host-lib-modules1109 - hostPath:1110 path: /run/xtables.lock1111 type: FileOrCreate1112 name: xtables-lock1113 updateStrategy:1114 type: RollingUpdate1115`,1116`apiVersion: admissionregistration.k8s.io/v11117kind: MutatingWebhookConfiguration1118metadata:1119 labels:1120 app: antrea1121 name: crdmutator.antrea.tanzu.vmware.com1122webhooks:1123- admissionReviewVersions:1124 - v11125 - v1beta11126 clientConfig:1127 service:1128 name: antrea1129 namespace: kube-system1130 path: /mutate/acnp1131 name: acnpmutator.antrea.tanzu.vmware.com1132 rules:1133 - apiGroups:1134 - security.antrea.tanzu.vmware.com1135 apiVersions:1136 - v1alpha11137 operations:1138 - CREATE1139 - UPDATE1140 resources:1141 - clusternetworkpolicies1142 scope: Cluster1143 sideEffects: None1144 timeoutSeconds: 51145- admissionReviewVersions:1146 - v11147 - v1beta11148 clientConfig:1149 service:1150 name: antrea1151 namespace: kube-system1152 path: /mutate/anp1153 name: anpmutator.antrea.tanzu.vmware.com1154 rules:1155 - apiGroups:1156 - security.antrea.tanzu.vmware.com1157 apiVersions:1158 - v1alpha11159 operations:1160 - CREATE1161 - UPDATE1162 resources:1163 - networkpolicies1164 scope: Namespaced1165 sideEffects: None1166 timeoutSeconds: 51167`,1168`apiVersion: admissionregistration.k8s.io/v11169kind: ValidatingWebhookConfiguration1170metadata:1171 labels:1172 app: antrea1173 name: crdvalidator.antrea.tanzu.vmware.com1174webhooks:1175- admissionReviewVersions:1176 - v11177 - v1beta11178 clientConfig:1179 service:1180 name: antrea1181 namespace: kube-system1182 path: /validate/tier1183 name: tiervalidator.antrea.tanzu.vmware.com1184 rules:1185 - apiGroups:1186 - security.antrea.tanzu.vmware.com1187 apiVersions:1188 - v1alpha11189 operations:1190 - CREATE1191 - UPDATE1192 - DELETE1193 resources:1194 - tiers1195 scope: Cluster1196 sideEffects: None1197 timeoutSeconds: 51198- admissionReviewVersions:1199 - v11200 - v1beta11201 clientConfig:1202 service:1203 name: antrea1204 namespace: kube-system1205 path: /validate/acnp1206 name: acnpvalidator.antrea.tanzu.vmware.com1207 rules:1208 - apiGroups:1209 - security.antrea.tanzu.vmware.com1210 apiVersions:1211 - v1alpha11212 operations:1213 - CREATE1214 - UPDATE1215 resources:1216 - clusternetworkpolicies1217 scope: Cluster1218 sideEffects: None1219 timeoutSeconds: 51220- admissionReviewVersions:1221 - v11222 - v1beta11223 clientConfig:1224 service:1225 name: antrea1226 namespace: kube-system1227 path: /validate/anp1228 name: anpvalidator.antrea.tanzu.vmware.com1229 rules:1230 - apiGroups:1231 - security.antrea.tanzu.vmware.com1232 apiVersions:1233 - v1alpha11234 operations:1235 - CREATE1236 - UPDATE1237 resources:1238 - networkpolicies1239 scope: Namespaced1240 sideEffects: None1241 timeoutSeconds: 51242- admissionReviewVersions:1243 - v11244 - v1beta11245 clientConfig:1246 service:1247 name: antrea1248 namespace: kube-system1249 path: /validate/clustergroup1250 name: clustergroupvalidator.antrea.tanzu.vmware.com1251 rules:1252 - apiGroups:1253 - core.antrea.tanzu.vmware.com1254 apiVersions:1255 - v1alpha21256 operations:1257 - CREATE1258 - UPDATE1259 - DELETE1260 resources:1261 - clustergroups1262 scope: Cluster1263 sideEffects: None1264 timeoutSeconds: 51265`,1266}...
attach_test.go
Source:attach_test.go
1// Copyright 2016 VMware, Inc. All Rights Reserved.2//3// Licensed under the Apache License, Version 2.0 (the "License");4// you may not use this file except in compliance with the License.5// You may obtain a copy of the License at6//7// http://www.apache.org/licenses/LICENSE-2.08//9// Unless required by applicable law or agreed to in writing, software10// distributed under the License is distributed on an "AS IS" BASIS,11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.12// See the License for the specific language governing permissions and13// limitations under the License.14package main15import (16 "bytes"17 "context"18 "crypto/rand"19 "crypto/rsa"20 "crypto/x509"21 "encoding/pem"22 "errors"23 "fmt"24 "io"25 "net"26 "os"27 "path"28 "sync"29 "syscall"30 "testing"31 "time"32 log "github.com/sirupsen/logrus"33 "github.com/stretchr/testify/assert"34 "golang.org/x/crypto/ssh"35 "github.com/vmware/vic/lib/config/executor"36 "github.com/vmware/vic/lib/portlayer/attach/communication"37 "github.com/vmware/vic/lib/migration/feature"38 "github.com/vmware/vic/lib/tether"39 "github.com/vmware/vic/pkg/serial"40)41type testAttachServer struct {42 attachServerSSH43 enabled bool44 updated chan bool45}46func (t *testAttachServer) start() error {47 t.testing = true48 err := t.attachServerSSH.start()49 if err == nil {50 t.updated <- true51 t.enabled = true52 }53 log.Info("Started test attach server")54 return err55}56func (t *testAttachServer) stop() error {57 if t.enabled {58 err := t.attachServerSSH.stop()59 if err == nil {60 log.Info("Stopped test attach server")61 t.updated <- true62 t.enabled = false63 }64 return err65 }66 return nil67}68func (t *testAttachServer) Reload(config *tether.ExecutorConfig) error {69 log.Info("Parsing config in test attach server")70 return t.attachServerSSH.Reload(config)71}72func (t *testAttachServer) Start(sys tether.System) error {73 log.Info("opening ttyS0 pipe pair for backchannel (server)")74 c, err := os.OpenFile(path.Join(pathPrefix, "ttyS0c"), os.O_WRONLY|syscall.O_NOCTTY, 0777)75 if err != nil {76 detail := fmt.Sprintf("failed to open cpipe for backchannel: %s", err)77 log.Error(detail)78 return errors.New(detail)79 }80 s, err := os.OpenFile(path.Join(pathPrefix, "ttyS0s"), os.O_RDONLY|syscall.O_NOCTTY, 0777)81 if err != nil {82 detail := fmt.Sprintf("failed to open spipe for backchannel: %s", err)83 log.Error(detail)84 return errors.New(detail)85 }86 log.Infof("creating raw connection from ttyS0 pipe pair for server (c=%d, s=%d) %s\n", c.Fd(), s.Fd(), pathPrefix)87 conn, err := serial.NewHalfDuplexFileConn(s, c, path.Join(pathPrefix, "ttyS0"), "file")88 if err != nil {89 detail := fmt.Sprintf("failed to create raw connection from ttyS0 pipe pair: %s", err)90 log.Error(detail)91 return errors.New(detail)92 }93 t.conn.Lock()94 defer t.conn.Unlock()95 t.conn.conn = conn96 return nil97}98func (t *testAttachServer) Stop() error {99 return t.attachServerSSH.Stop()100}101// create client on the mock pipe102func mockBackChannel(ctx context.Context) (net.Conn, error) {103 log.Info("opening ttyS0 pipe pair for backchannel (client)")104 c, err := os.OpenFile(path.Join(pathPrefix, "ttyS0c"), os.O_RDONLY|syscall.O_NOCTTY, 0777)105 if err != nil {106 detail := fmt.Sprintf("failed to open cpipe for backchannel: %s", err)107 log.Error(detail)108 return nil, errors.New(detail)109 }110 s, err := os.OpenFile(path.Join(pathPrefix, "ttyS0s"), os.O_WRONLY|syscall.O_NOCTTY, 0777)111 if err != nil {112 detail := fmt.Sprintf("failed to open spipe for backchannel: %s", err)113 log.Error(detail)114 return nil, errors.New(detail)115 }116 log.Infof("creating raw connection from ttyS0 pipe pair for backchannel (c=%d, s=%d) %s\n", c.Fd(), s.Fd(), pathPrefix)117 conn, err := serial.NewHalfDuplexFileConn(c, s, path.Join(pathPrefix, "ttyS0"), "file")118 if err != nil {119 detail := fmt.Sprintf("failed to create raw connection from ttyS0 pipe pair: %s", err)120 log.Error(detail)121 return nil, errors.New(detail)122 }123 // HACK: currently RawConn dosn't implement timeout so throttle the spinning124 ticker := time.NewTicker(1000 * time.Millisecond)125 for {126 select {127 case <-ticker.C:128 err := serial.HandshakeClient(conn)129 if err != nil {130 if err == io.EOF {131 // with unix pipes the open will block until both ends are open, therefore132 // EOF means the other end has been intentionally closed133 return nil, err134 }135 log.Error(err)136 } else {137 return conn, nil138 }139 case <-ctx.Done():140 conn.Close()141 ticker.Stop()142 return nil, ctx.Err()143 }144 }145}146// create client on the mock pipe and dial the given host:port147func mockNetworkToSerialConnection(host string) (*sync.WaitGroup, error) {148 log.Info("opening ttyS0 pipe pair for backchannel")149 c, err := os.OpenFile(path.Join(pathPrefix, "ttyS0c"), os.O_RDONLY|syscall.O_NOCTTY, 0777)150 if err != nil {151 return nil, fmt.Errorf("failed to open cpipe for backchannel: %s", err)152 }153 s, err := os.OpenFile(path.Join(pathPrefix, "ttyS0s"), os.O_WRONLY|syscall.O_NOCTTY, 0777)154 if err != nil {155 return nil, fmt.Errorf("failed to open spipe for backchannel: %s", err)156 }157 log.Infof("creating raw connection from ttyS0 pipe pair (c=%d, s=%d)\n", c.Fd(), s.Fd())158 conn, err := serial.NewHalfDuplexFileConn(c, s, path.Join(pathPrefix, "/ttyS0"), "file")159 if err != nil {160 return nil, fmt.Errorf("failed to create raw connection from ttyS0 pipe pair: %s", err)161 }162 // Dial the attach server. This is a TCP client163 networkClientCon, err := net.Dial("tcp", host)164 if err != nil {165 return nil, err166 }167 log.Debugf("dialed %s", host)168 wg := sync.WaitGroup{}169 wg.Add(2)170 go func() {171 io.Copy(networkClientCon, conn)172 wg.Done()173 }()174 go func() {175 io.Copy(conn, networkClientCon)176 wg.Done()177 }()178 return &wg, nil179}180func genKey() []byte {181 // generate a host key for the tether182 privateKey, err := rsa.GenerateKey(rand.Reader, 512)183 if err != nil {184 panic("unable to generate private key during test")185 }186 privateKeyDer := x509.MarshalPKCS1PrivateKey(privateKey)187 privateKeyBlock := pem.Block{188 Type: "RSA PRIVATE KEY",189 Headers: nil,190 Bytes: privateKeyDer,191 }192 return pem.EncodeToMemory(&privateKeyBlock)193}194func attachCase(t *testing.T, runblock bool) {195 mocker := testSetup(t)196 defer testTeardown(t, mocker)197 testServer, _ := server.(*testAttachServer)198 cfg := executor.ExecutorConfig{199 Diagnostics: executor.Diagnostics{200 DebugLevel: 1,201 },202 ExecutorConfigCommon: executor.ExecutorConfigCommon{203 ID: "attach",204 Name: "tether_test_executor",205 },206 Sessions: map[string]*executor.SessionConfig{207 "attach": {208 Common: executor.Common{209 ID: "attach",210 Name: "tether_test_session",211 },212 Tty: false,213 Attach: true,214 Active: true,215 OpenStdin: true,216 RunBlock: runblock,217 Cmd: executor.Cmd{218 Path: "/usr/bin/tee",219 // grep, matching everything, reading from stdin220 Args: []string{"/usr/bin/tee", pathPrefix + "/tee.out"},221 Env: []string{},222 Dir: "/",223 },224 },225 },226 Key: genKey(),227 }228 _, _, conn := StartAttachTether(t, &cfg, mocker)229 defer conn.Close()230 // wait for updates to occur231 <-testServer.updated232 if !testServer.enabled {233 t.Errorf("attach server was not enabled")234 }235 containerConfig := &ssh.ClientConfig{236 User: "daemon",237 HostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error {238 return nil239 },240 }241 // create the SSH client from the mocked connection242 sshConn, chans, reqs, err := ssh.NewClientConn(conn, "notappliable", containerConfig)243 assert.NoError(t, err)244 defer sshConn.Close()245 ssh.NewClient(sshConn, chans, reqs)246 _, err = communication.ContainerIDs(sshConn)247 version, err := communication.ContainerVersion(sshConn)248 assert.NoError(t, err)249 sshSession, err := communication.NewSSHInteraction(sshConn, cfg.ID, version)250 if runblock {251 sshSession.Unblock()252 }253 assert.NoError(t, err)254 stdout := sshSession.Stdout()255 // FIXME: the pipe pair are line buffered - how do I disable that so we256 // don't have odd hangs to diagnose when the trailing \n is missed257 testBytes := []byte("\x1b[32mhello world\x1b[39m!\n")258 // read from session into buffer259 buf := &bytes.Buffer{}260 done := make(chan bool)261 go func() { io.Copy(buf, stdout); done <- true }()262 // write something to echo263 log.Debug("sending test data")264 sshSession.Stdin().Write(testBytes)265 log.Debug("sent test data")266 // wait for the close to propagate267 sshSession.CloseStdin()268 <-done269 // sshSession.Close()270}271func TestAttach(t *testing.T) {272 attachCase(t, false)273}274func TestAttachBlock(t *testing.T) {275 attachCase(t, true)276}277//278/////////////////////////////////////////////////////////////////////////////////////279/////////////////////////////////////////////////////////////////////////////////////280// TestAttachTTYConfig sets up the config for attach testing281//282func TestAttachTTY(t *testing.T) {283 t.Skip("TTY test skipped - not sure how to test this correctly")284 mocker := testSetup(t)285 defer testTeardown(t, mocker)286 testServer, _ := server.(*testAttachServer)287 cfg := executor.ExecutorConfig{288 ExecutorConfigCommon: executor.ExecutorConfigCommon{289 ID: "attach",290 Name: "tether_test_executor",291 },292 Sessions: map[string]*executor.SessionConfig{293 "attach": {294 Common: executor.Common{295 ID: "attach",296 Name: "tether_test_session",297 },298 Tty: true,299 Attach: true,300 Active: true,301 OpenStdin: true,302 Cmd: executor.Cmd{303 Path: "/usr/bin/tee",304 // grep, matching everything, reading from stdin305 Args: []string{"/usr/bin/tee", pathPrefix + "/tee.out"},306 Env: []string{},307 Dir: "/",308 },309 },310 },311 Key: genKey(),312 }313 _, _, conn := StartAttachTether(t, &cfg, mocker)314 defer conn.Close()315 // wait for updates to occur316 <-testServer.updated317 if !testServer.enabled {318 t.Errorf("attach server was not enabled")319 }320 cconfig := &ssh.ClientConfig{321 User: "daemon",322 HostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error {323 return nil324 },325 }326 // create the SSH client327 sConn, chans, reqs, err := ssh.NewClientConn(conn, "notappliable", cconfig)328 assert.NoError(t, err)329 defer sConn.Close()330 client := ssh.NewClient(sConn, chans, reqs)331 session, err := communication.NewSSHInteraction(client, cfg.ID, feature.MaxPluginVersion-1)332 assert.NoError(t, err)333 stdout := session.Stdout()334 // FIXME: this is line buffered - how do I disable that so we don't have odd hangs to diagnose335 // when the trailing \n is missed336 testBytes := []byte("\x1b[32mhello world\x1b[39m!\n")337 // after tty translation the above string should result in the following338 refBytes := []byte("\x5e[[32mhello world\x5e[[39m!\n")339 // read from session into buffer340 buf := &bytes.Buffer{}341 var wg sync.WaitGroup342 wg.Add(1)343 go func() {344 io.CopyN(buf, stdout, int64(len(refBytes)))345 wg.Done()346 }()347 // write something to echo348 log.Debug("sending test data")349 session.Stdin().Write(testBytes)350 log.Debug("sent test data")351 // wait for the close to propagate352 wg.Wait()353 session.CloseStdin()354 assert.Equal(t, refBytes, buf.Bytes())355}356//357/////////////////////////////////////////////////////////////////////////////////////358/////////////////////////////////////////////////////////////////////////////////////359// TestAttachTTYStdinClose sets up the config for attach testing360//361func TestAttachTTYStdinClose(t *testing.T) {362 mocker := testSetup(t)363 defer testTeardown(t, mocker)364 testServer, _ := server.(*testAttachServer)365 cfg := executor.ExecutorConfig{366 ExecutorConfigCommon: executor.ExecutorConfigCommon{367 ID: "sort",368 Name: "tether_test_executor",369 },370 Diagnostics: executor.Diagnostics{371 DebugLevel: 1,372 },373 Sessions: map[string]*executor.SessionConfig{374 "sort": {375 Common: executor.Common{376 ID: "sort",377 Name: "tether_test_session",378 },379 Tty: true,380 Attach: true,381 Active: true,382 OpenStdin: true,383 RunBlock: true,384 Cmd: executor.Cmd{385 Path: "/usr/bin/sort",386 // reading from stdin387 Args: []string{"/usr/bin/sort"},388 Env: []string{},389 Dir: "/",390 },391 },392 },393 Key: genKey(),394 }395 _, _, conn := StartAttachTether(t, &cfg, mocker)396 defer conn.Close()397 // wait for updates to occur398 <-testServer.updated399 if !testServer.enabled {400 t.Errorf("attach server was not enabled")401 }402 containerConfig := &ssh.ClientConfig{403 User: "daemon",404 HostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error {405 return nil406 },407 }408 // create the SSH client from the mocked connection409 sshConn, chans, reqs, err := ssh.NewClientConn(conn, "notappliable", containerConfig)410 assert.NoError(t, err)411 defer sshConn.Close()412 ssh.NewClient(sshConn, chans, reqs)413 _, err = communication.ContainerIDs(sshConn)414 assert.NoError(t, err)415 sshSession, err := communication.NewSSHInteraction(sshConn, cfg.ID, feature.MaxPluginVersion-1)416 assert.NoError(t, err)417 // unblock before grabbing stdout - this should buffer in ssh418 sshSession.Unblock()419 stdout := sshSession.Stdout()420 // FIXME: the pipe pair are line buffered - how do I disable that so we421 // don't have odd hangs to diagnose when the trailing \n is missed422 testBytes := []byte("one\ntwo\nthree\n")423 // after tty translation by sort the above string should result in the following424 // - we have echo turned on so we get a repeat of the initial string425 // - all \n bytes are translated to \r\n426 refBytes := []byte("one\r\ntwo\r\nthree\r\none\r\nthree\r\ntwo\r\n")427 // read from session into buffer428 buf := &bytes.Buffer{}429 done := make(chan bool)430 go func() {431 io.Copy(buf, stdout)432 log.Debug("stdout copy complete")433 done <- true434 }()435 // write something to echo436 log.Debug("sending test data")437 sshSession.Stdin().Write(testBytes)438 log.Debug("sent test data")439 // wait for the close to propagate440 sshSession.CloseStdin()441 <-done442 assert.Equal(t, refBytes, buf.Bytes())443}444//445/////////////////////////////////////////////////////////////////////////////////////446/////////////////////////////////////////////////////////////////////////////////////447// TestEcho ensures we get back data without a tty and without any stdin interaction448//449func TestEcho(t *testing.T) {450 mocker := testSetup(t)451 defer testTeardown(t, mocker)452 testServer, _ := server.(*testAttachServer)453 cfg := executor.ExecutorConfig{454 ExecutorConfigCommon: executor.ExecutorConfigCommon{455 ID: "echo",456 Name: "tether_test_executor",457 },458 Diagnostics: executor.Diagnostics{459 DebugLevel: 1,460 },461 Sessions: map[string]*executor.SessionConfig{462 "echo": {463 Common: executor.Common{464 ID: "echo",465 Name: "tether_test_session",466 },467 Tty: false,468 Attach: true,469 Active: true,470 OpenStdin: true,471 RunBlock: true,472 Cmd: executor.Cmd{473 Path: "/bin/echo",474 // reading from stdin475 Args: []string{"/bin/echo", "hello"},476 Env: []string{},477 Dir: "/",478 },479 },480 },481 Key: genKey(),482 }483 _, _, conn := StartAttachTether(t, &cfg, mocker)484 defer conn.Close()485 // wait for updates to occur486 <-testServer.updated487 if !testServer.enabled {488 t.Errorf("attach server was not enabled")489 }490 containerConfig := &ssh.ClientConfig{491 User: "daemon",492 HostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error {493 return nil494 },495 }496 // create the SSH client from the mocked connection497 sshConn, chans, reqs, err := ssh.NewClientConn(conn, "notappliable", containerConfig)498 assert.NoError(t, err)499 defer sshConn.Close()500 ssh.NewClient(sshConn, chans, reqs)501 _, err = communication.ContainerIDs(sshConn)502 assert.NoError(t, err)503 version, err := communication.ContainerVersion(sshConn)504 assert.NoError(t, err)505 sshSession, err := communication.NewSSHInteraction(sshConn, cfg.ID, version)506 assert.NoError(t, err)507 // unblock before grabbing stdout - this should buffer in ssh508 sshSession.Unblock()509 stdout := sshSession.Stdout()510 stderr := sshSession.Stderr()511 doneStdout := make(chan bool)512 doneStderr := make(chan bool)513 // read from session into buffer514 bufout := &bytes.Buffer{}515 go func() {516 io.Copy(bufout, stdout)517 log.Debug("stdout copy complete")518 doneStdout <- true519 }()520 // read from session into buffer521 buferr := &bytes.Buffer{}522 go func() {523 io.Copy(buferr, stderr)524 log.Debug("stderr copy complete")525 doneStderr <- true526 }()527 // wait for the close to propagate528 <-doneStdout529 assert.Equal(t, "hello\n", string(bufout.Bytes()))530 <-doneStderr531 assert.Equal(t, "", string(buferr.Bytes()))532}533func TestEchoRepeat(t *testing.T) {534 log.SetLevel(log.WarnLevel)535 for i := 0; i < 10 && !t.Failed(); i++ {536 TestEcho(t)537 }538 defer log.SetLevel(log.DebugLevel)539}540//541/////////////////////////////////////////////////////////////////////////////////////542/////////////////////////////////////////////////////////////////////////////////////543// TestAttachMultiple sets up the config for attach testing - tests launching and544// attaching to multiple processes simultaneously545//546func TestAttachMultiple(t *testing.T) {547 mocker := testSetup(t)548 defer testTeardown(t, mocker)549 testServer, _ := server.(*testAttachServer)550 cfg := executor.ExecutorConfig{551 ExecutorConfigCommon: executor.ExecutorConfigCommon{552 ID: "tee1",553 Name: "tether_test_executor",554 },555 Sessions: map[string]*executor.SessionConfig{556 "tee1": {557 Common: executor.Common{558 ID: "tee1",559 Name: "tether_test_session",560 },561 Tty: false,562 Attach: true,563 Active: true,564 OpenStdin: true,565 Cmd: executor.Cmd{566 Path: "/usr/bin/tee",567 // grep, matching everything, reading from stdin568 Args: []string{"/usr/bin/tee", pathPrefix + "/tee1.out"},569 Env: []string{},570 Dir: "/",571 },572 },573 "tee2": {574 Common: executor.Common{575 ID: "tee2",576 Name: "tether_test_session2",577 },578 Tty: false,579 Attach: true,580 Active: true,581 OpenStdin: true,582 Cmd: executor.Cmd{583 Path: "/usr/bin/tee",584 // grep, matching everything, reading from stdin585 Args: []string{"/usr/bin/tee", pathPrefix + "/tee2.out"},586 Env: []string{},587 Dir: "/",588 },589 },590 "tee3": {591 Common: executor.Common{592 ID: "tee3",593 Name: "tether_test_session2",594 },595 Tty: false,596 Attach: false,597 Active: true,598 OpenStdin: true,599 Cmd: executor.Cmd{600 Path: "/usr/bin/tee",601 // grep, matching everything, reading from stdin602 Args: []string{"/usr/bin/tee", pathPrefix + "/tee3.out"},603 Env: []string{},604 Dir: "/",605 },606 },607 },608 Key: genKey(),609 Diagnostics: executor.Diagnostics{610 DebugLevel: 1,611 },612 }613 _, _, conn := StartAttachTether(t, &cfg, mocker)614 defer conn.Close()615 // wait for updates to occur616 <-mocker.Started617 <-testServer.updated618 if !testServer.enabled {619 t.Errorf("attach server was not enabled")620 }621 cconfig := &ssh.ClientConfig{622 User: "daemon",623 HostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error {624 return nil625 },626 }627 // create the SSH client628 sConn, chans, reqs, err := ssh.NewClientConn(conn, "notappliable", cconfig)629 assert.NoError(t, err)630 defer sConn.Close()631 client := ssh.NewClient(sConn, chans, reqs)632 ids, err := communication.ContainerIDs(client)633 assert.NoError(t, err)634 version, err := communication.ContainerVersion(client)635 assert.NoError(t, err)636 // there's no ordering guarantee in the returned ids637 if len(ids) != len(cfg.Sessions) {638 t.Errorf("ID list - expected %d, got %d", len(cfg.Sessions), len(ids))639 }640 // check the ids we got correspond to those in the config641 for _, id := range ids {642 if _, ok := cfg.Sessions[id]; !ok {643 t.Errorf("Expected sessions to have an entry for %s", id)644 }645 }646 sessionA, err := communication.NewSSHInteraction(client, "tee1", version)647 assert.NoError(t, err)648 sessionB, err := communication.NewSSHInteraction(client, "tee2", version)649 assert.NoError(t, err)650 stdoutA := sessionA.Stdout()651 stdoutB := sessionB.Stdout()652 // FIXME: this is line buffered - how do I disable that so we don't have odd hangs to diagnose653 // when the trailing \n is missed654 testBytesA := []byte("hello world!\n")655 testBytesB := []byte("goodbye world!\n")656 // read from session into buffer657 bufA := &bytes.Buffer{}658 bufB := &bytes.Buffer{}659 var wg sync.WaitGroup660 // wg.Add cannot go inside the go routines as the Add may not have happened by the time we call Wait661 wg.Add(2)662 go func() {663 io.CopyN(bufA, stdoutA, int64(len(testBytesA)))664 wg.Done()665 }()666 go func() {667 io.CopyN(bufB, stdoutB, int64(len(testBytesB)))668 wg.Done()669 }()670 // write something to echo671 log.Debug("sending test data")672 sessionA.Stdin().Write(testBytesA)673 sessionB.Stdin().Write(testBytesB)674 log.Debug("sent test data")675 // wait for the close to propagate676 wg.Wait()677 sessionA.CloseStdin()678 sessionB.CloseStdin()679 <-mocker.Cleaned680 assert.Equal(t, bufA.Bytes(), testBytesA)681 assert.Equal(t, bufB.Bytes(), testBytesB)682}683//684/////////////////////////////////////////////////////////////////////////////////////685/////////////////////////////////////////////////////////////////////////////////////686// TestAttachInvalid sets up the config for attach testing - launches a process but687// tries to attach to an invalid session id688//689func TestAttachInvalid(t *testing.T) {690 mocker := testSetup(t)691 defer testTeardown(t, mocker)692 testServer, _ := server.(*testAttachServer)693 cfg := executor.ExecutorConfig{694 ExecutorConfigCommon: executor.ExecutorConfigCommon{695 ID: "attachinvalid",696 Name: "tether_test_executor",697 },698 Sessions: map[string]*executor.SessionConfig{699 "valid": {700 Common: executor.Common{701 ID: "valid",702 Name: "tether_test_session",703 },704 Tty: false,705 Attach: true,706 Active: true,707 OpenStdin: true,708 Cmd: executor.Cmd{709 Path: "/usr/bin/tee",710 // grep, matching everything, reading from stdin711 Args: []string{"/usr/bin/tee", pathPrefix + "/tee.out"},712 Env: []string{},713 Dir: "/",714 },715 },716 },717 Key: genKey(),718 }719 tthr, _, conn := StartAttachTether(t, &cfg, mocker)720 defer conn.Close()721 // wait for updates to occur722 <-testServer.updated723 if !testServer.enabled {724 t.Errorf("attach server was not enabled")725 }726 cconfig := &ssh.ClientConfig{727 User: "daemon",728 HostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error {729 return nil730 },731 }732 // create the SSH client733 sConn, chans, reqs, err := ssh.NewClientConn(conn, "notappliable", cconfig)734 assert.NoError(t, err)735 defer sConn.Close()736 client := ssh.NewClient(sConn, chans, reqs)737 version, err := communication.ContainerVersion(client)738 assert.NoError(t, err)739 _, err = communication.NewSSHInteraction(client, "invalid", version)740 tthr.Stop()741 if err == nil {742 t.Errorf("Expected to fail on attempt to attach to invalid session")743 }744}745//746/////////////////////////////////////////////////////////////////////////////////////747// Start the tether, start a mock esx serial to tcp connection, start the748// attach server, try to Get() the tether's attached session.749/*750func TestMockAttachTetherToPL(t *testing.T) {751 testSetup(t)752 defer testTeardown(t)753 // Start the PL attach server754 testServer := communication.NewAttachServer("", 8080)755 assert.NoError(t, testServer.Start())756 defer testServer.Stop()757 cfg := executor.ExecutorConfig{758 Common: executor.Common{759 ID: "attach",760 Name: "tether_test_executor",761 },762 Sessions: map[string]*executor.SessionConfig{763 "attach": executor.SessionConfig{764 Common: executor.Common{765 ID: "attach",766 Name: "tether_test_session",767 },768 Tty: true,769 Attach: true,770 Cmd: executor.Cmd{771 Path: "/usr/bin/tee",772 // grep, matching everything, reading from stdin773 Args: []string{"/usr/bin/tee", pathPrefix + "/tee.out"},774 Env: []string{},775 Dir: "/",776 },777 },778 },779 Key: genKey(),780 }781 StartTether(t, &cfg)782 // create a conn on the mock pipe. Reads from pipe, echos to network.783 _, err := mockNetworkToSerialConnection(testServer.Addr())784 if !assert.NoError(t, err) {785 return786 }787 var pty communication.SessionInteractor788 pty, err = testServer.Get(context.Background(), "attach", 600*time.Second)789 if !assert.NoError(t, err) {790 return791 }792 err = pty.Resize(1, 2, 3, 4)793 if !assert.NoError(t, err) {794 return795 }796 if !assert.Equal(t, Mocked.WindowCol, uint32(1)) || !assert.Equal(t, Mocked.WindowRow, uint32(2)) {797 return798 }799 if err = pty.Signal("HUP"); !assert.NoError(t, err) {800 return801 }802 if !assert.Equal(t, Mocked.Signal, ssh.Signal("HUP")) {803 return804 }805}806*/807func TestReattach(t *testing.T) {808 mocker := testSetup(t)809 defer testTeardown(t, mocker)810 testServer, _ := server.(*testAttachServer)811 cfg := executor.ExecutorConfig{812 ExecutorConfigCommon: executor.ExecutorConfigCommon{813 ID: "attach",814 Name: "tether_test_executor",815 },816 Diagnostics: executor.Diagnostics{817 DebugLevel: 1,818 },819 Sessions: map[string]*executor.SessionConfig{820 "attach": {821 Common: executor.Common{822 ID: "attach",823 Name: "tether_test_session",824 },825 Tty: false,826 Attach: true,827 Active: true,828 RunBlock: true,829 OpenStdin: true,830 Cmd: executor.Cmd{831 Path: "/usr/bin/tee",832 // grep, matching everything, reading from stdin833 Args: []string{"/usr/bin/tee", pathPrefix + "/tee.out"},834 Env: []string{},835 Dir: "/",836 },837 },838 },839 Key: genKey(),840 }841 _, _, conn := StartAttachTether(t, &cfg, mocker)842 defer conn.Close()843 // wait for updates to occur844 <-testServer.updated845 if !testServer.enabled {846 t.Errorf("attach server was not enabled")847 }848 containerConfig := &ssh.ClientConfig{849 User: "daemon",850 HostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error {851 return nil852 },853 }854 // create the SSH client from the mocked connection855 sshConn, chans, reqs, err := ssh.NewClientConn(conn, "notappliable", containerConfig)856 assert.NoError(t, err)857 defer sshConn.Close()858 var sshSession communication.SessionInteractor859 done := make(chan bool)860 buf := &bytes.Buffer{}861 testBytes := []byte("\x1b[32mhello world\x1b[39m!\n")862 attachFunc := func() {863 attachClient := ssh.NewClient(sshConn, chans, reqs)864 if attachClient == nil {865 t.Errorf("Failed to get ssh.NewClient")866 }867 _, err = communication.ContainerIDs(sshConn)868 assert.NoError(t, err)869 version, err := communication.ContainerVersion(sshConn)870 assert.NoError(t, err)871 sshSession, err = communication.NewSSHInteraction(sshConn, cfg.ID, version)872 assert.NoError(t, err)873 sshSession.Unblock()874 stdout := sshSession.Stdout()875 // read from session into buffer876 go func() {877 io.CopyN(buf, stdout, int64(len(testBytes)))878 done <- true879 }()880 // write something to echo881 log.Debug("sending test data")882 sshSession.Stdin().Write(testBytes)883 log.Debug("sent test data")884 }885 limit := 10886 for i := 0; i <= limit; i++ {887 if i > 0 {888 // truncate the buffer for the retach889 buf.Reset()890 testBytes = []byte(fmt.Sprintf("\x1b[32mhello world - again %dth time \x1b[39m!\n", i))891 }892 // attach893 attachFunc()894 // wait for the close to propagate895 <-done896 // send close-stdin if this is the last iteration897 if i == limit {898 // exit899 sshSession.CloseStdin()900 } else {901 // detach902 sshSession.Stdin().Close()903 }904 assert.Equal(t, buf.Bytes(), testBytes)905 }906}...
vmware.go
Source:vmware.go
...203 merger.Add("console", tty)204 merger.Add("ssh", rpipe)205 return vmimpl.Multiplex(cmd, merger, tty, timeout, stop, inst.closed, inst.debug)206}207func (inst *instance) Diagnose() ([]byte, bool) {208 return nil, false209}...
Diagnose
Using AI Code Generation
1import (2func main() {3 if len(os.Args) != 2 {4 fmt.Println("Usage: go run 2.go <vmname>")5 os.Exit(1)6 }
Diagnose
Using AI Code Generation
1import (2func main() {3 if len(os.Args) != 3 {4 fmt.Println("Usage: go run 2.go <ip> <username:password>")5 os.Exit(1)6 }7 if err != nil {8 fmt.Println(err)9 os.Exit(1)10 }11 u.User = url.UserPassword(strings.Split(os.Args[2], ":")[0], strings.Split(os.Args[2], ":")[1])12 c, err := govmomi.NewClient(context.Background(), u, true)13 if err != nil {14 fmt.Println(err)15 os.Exit(1)16 }17 hs := types.ManagedObjectReference{18 }19 r, err := m.Diagnose(context.Background(), hs)20 if err != nil {21 fmt.Println(err)22 os.Exit(1)23 }24 fmt.Println(r)25}
Diagnose
Using AI Code Generation
1func main() {2 v := vmware.New()3 v.Diagnose()4}5func main() {6 v := vmware.New()7 v.Diagnose()8}9func main() {10 v := vmware.New()11 v.Diagnose()12}13func main() {14 v := vmware.New()15 v.Diagnose()16}17func main() {18 v := vmware.New()19 v.Diagnose()20}21func main() {22 v := vmware.New()23 v.Diagnose()24}25func main() {26 v := vmware.New()27 v.Diagnose()28}29func main() {30 v := vmware.New()31 v.Diagnose()32}33func main() {34 v := vmware.New()35 v.Diagnose()36}37func main() {38 v := vmware.New()39 v.Diagnose()40}41func main() {42 v := vmware.New()43 v.Diagnose()44}45func main() {46 v := vmware.New()47 v.Diagnose()48}49func main() {50 v := vmware.New()51 v.Diagnose()52}53func main() {
Diagnose
Using AI Code Generation
1import (2func main() {3 v.Diagnose()4}5import "fmt"6type Vmware struct {7}8func (v Vmware) Diagnose() {9 fmt.Println("Diagnosing vmware")10}11import (12func main() {13 v.Diagnose()14}15import "fmt"16type Vmware struct {17}18func (v *Vmware) Diagnose() {19 fmt.Println("Diagnosing vmware")20}21import
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!